diff --git a/Makefile b/Makefile index a707510..c84cb7d 100644 --- a/Makefile +++ b/Makefile @@ -1,2 +1,5 @@ +GOMOD=$(shell test -f "go.work" && echo "readonly" || echo "vendor") +LDFLAGS=-s -w + cli: - go build -mod vendor -o bin/server cmd/server/main.go + go build -mod $(GOMOD) -ldflags="$(LDFLAGS)" -o bin/server cmd/server/main.go diff --git a/catalog/catalog.go b/catalog/catalog.go index 5eac60b..314c295 100644 --- a/catalog/catalog.go +++ b/catalog/catalog.go @@ -1,2 +1,3 @@ // package catalog provides methods for creating lookup tables of tilepack.MbtilesReader instances. package catalog + diff --git a/catalog/fs.go b/catalog/fs.go index 9dd5fb9..29fad1f 100644 --- a/catalog/fs.go +++ b/catalog/fs.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/tilezen/go-tilepacks/tilepack" "io/fs" - "path/filepath" - "strings" "os" + "path/filepath" "regexp" + "strings" ) // MatchFunc is a custom function to signal whether a given path is a valid MBTiles database path or filename. @@ -22,7 +22,7 @@ func NewCatalogFromFS(db_fs fs.FS) (map[string]tilepack.MbtilesReader, error) { if err != nil { return nil, err } - + return NewCatalogFromFSWithRegexp(db_fs, re) } @@ -32,7 +32,7 @@ func NewCatalogFromFSWithRegexp(db_fs fs.FS, re *regexp.Regexp) (map[string]tile match_func := func(path string) (bool, error) { - if re.MatchString(path){ + if re.MatchString(path) { return true, nil } @@ -47,7 +47,7 @@ func NewCatalogFromFSWithRegexp(db_fs fs.FS, re *regexp.Regexp) (map[string]tile func NewCatalogFromFSWithMatchFunc(db_fs fs.FS, match_func MatchFunc) (map[string]tilepack.MbtilesReader, error) { files := make([]string, 0) - + walk_fn := func(path string, d fs.DirEntry, err error) error { if err != nil { @@ -74,7 +74,7 @@ func NewCatalogFromFSWithMatchFunc(db_fs fs.FS, match_func MatchFunc) (map[strin } catalog := make(map[string]tilepack.MbtilesReader) - + for _, path := range files { fname := filepath.Base(path) @@ -83,21 +83,21 @@ func NewCatalogFromFSWithMatchFunc(db_fs fs.FS, match_func MatchFunc) (map[strin layer := strings.Replace(fname, ext, "", -1) // START OF things we probably shouldn't be doing - + var abs_path string str_type := fmt.Sprintf("%T", db_fs) - + switch str_type { case "os.dirFS": - root_fs := fmt.Sprintf("%s", db_fs) // SEE THIS? It totally breaks the fs.FS contract + root_fs := fmt.Sprintf("%s", db_fs) // SEE THIS? It totally breaks the fs.FS contract abs_path = filepath.Join(root_fs, path) default: // Create tmp files and io.Copy the data in to place // Note the absence of any good way to clean up that tmp data // yet... - + return nil, fmt.Errorf("Unsupport fs.FS type '%T'", db_fs) } @@ -112,7 +112,7 @@ func NewCatalogFromFSWithMatchFunc(db_fs fs.FS, match_func MatchFunc) (map[strin } // END OF things we probably shouldn't be doing - + r, err := tilepack.NewMbtilesReader(abs_path) if err != nil { diff --git a/cmd/server/main.go b/cmd/server/main.go index ccfd556..d51feae 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -23,7 +23,7 @@ func main() { fmt.Fprintf(os.Stderr, "Options:\n") flag.PrintDefaults() } - + flag.Parse() ctx := context.Background() diff --git a/go.mod b/go.mod index 730fbb3..3899b17 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,21 @@ module github.com/sfomuseum/go-mbtiles-server -go 1.16 +go 1.22 require ( - github.com/aaronland/go-http-server v0.0.7 + github.com/aaronland/go-http-server v1.4.1 github.com/aaronland/go-mimetypes v0.0.2 - github.com/tilezen/go-tilepacks v0.0.0-20210818174005-d960bc00ff89 + github.com/paulmach/orb v0.11.1 + github.com/tilezen/go-tilepacks v0.0.0-20230902185537-9a48755ab988 +) + +require ( + github.com/aaronland/go-roster v1.0.0 // indirect + github.com/aaronland/go-string v1.0.0 // indirect + github.com/akrylysov/algnhsa v1.1.0 // indirect + github.com/aws/aws-lambda-go v1.46.0 // indirect + github.com/aws/aws-sdk-go v1.44.65 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/mattn/go-sqlite3 v1.14.14 // indirect + go.mongodb.org/mongo-driver v1.11.4 // indirect ) diff --git a/go.sum b/go.sum index a812462..39856d0 100644 --- a/go.sum +++ b/go.sum @@ -1,53 +1,119 @@ -github.com/aaronland/go-http-server v0.0.7 h1:hKY2e/WLkUhaeEx4KZW2iRcBfsSZqAV/wfLLRQcrECM= -github.com/aaronland/go-http-server v0.0.7/go.mod h1:6dtsZDrQG0XvUOCL0eaHsUw+7gw3Xa/Nm+XofvkSw2w= -github.com/aaronland/go-mimetypes v0.0.1 h1:O5rZ58U5d8n5SCHw12FdYNYCk1Ay4nz9Nx4P9SS/i2U= -github.com/aaronland/go-mimetypes v0.0.1/go.mod h1:bFXDLXp+3bRrzRhoD7RY2vSKTcsVjgdSckHxbuIdTLA= +github.com/aaronland/go-http-server v1.4.1 h1:GfrGHF9+aurisbPAVv1SbPhxu+PJppX4Eatig/dE8D4= +github.com/aaronland/go-http-server v1.4.1/go.mod h1:R52A7/N3HWHmAf4KjVgkAtsFyUl+VxzICyArko3zMKo= github.com/aaronland/go-mimetypes v0.0.2 h1:Gsi1ak00V+m+g8lVIROkM2mIdj7aLnvaF1iqfVkhHHc= github.com/aaronland/go-mimetypes v0.0.2/go.mod h1:bFXDLXp+3bRrzRhoD7RY2vSKTcsVjgdSckHxbuIdTLA= -github.com/aaronland/go-roster v0.0.2 h1:2Fu7v4VQLRLRL/Zgr6R9S5JxsW75Ab/K88QtMVX532s= -github.com/aaronland/go-roster v0.0.2/go.mod h1:AcovpxlG1XxJxX2Fjqlm63fEIBhCjEIBV4lP87FZDmI= -github.com/aaronland/go-string v0.1.2 h1:RSr/mQNbLgF37H0RV+nF7j2kILRRFkCmr8Jwq4lw92k= -github.com/aaronland/go-string v0.1.2/go.mod h1:2aMIWdTqk63jZsaLLy+p9dsB1MDRqx4sHYoLtkwyYUo= -github.com/akrylysov/algnhsa v0.0.0-20190319020909-05b3d192e9a7/go.mod h1:HhzjNA0EjUWcwHTUMwqrpeAdIF3gRmpH0HpWx1hYJSc= -github.com/aws/aws-lambda-go v1.9.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= -github.com/aws/aws-lambda-go v1.10.0 h1:uafgdfYGQD0UeT7d2uKdyWW8j/ZYRifRPIdmeqLzLCk= -github.com/aws/aws-lambda-go v1.10.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= -github.com/aws/aws-sdk-go v1.38.51 h1:aKQmbVbwOCuQSd8+fm/MR3bq0QOsu9Q7S+/QEND36oQ= -github.com/aws/aws-sdk-go v1.38.51/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.40.24 h1:qtXDYFzAxEmmZaa+4JA9loBqOujO0vm4ZOJoEmjG21E= -github.com/aws/aws-sdk-go v1.40.24/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aaronland/go-roster v1.0.0 h1:FRDGrTqsYySKjWnAhbBGXyeGlI/o5/t9FZYCbUmyQtI= +github.com/aaronland/go-roster v1.0.0/go.mod h1:KIsYZgrJlAsyb9LsXSCvlqvbcCBVjCSqcQiZx42i9ro= +github.com/aaronland/go-string v1.0.0 h1:fPHmC1i9JhGzgi4qdCSXKc4xTCaLLe0uvlJMu9dHhco= +github.com/aaronland/go-string v1.0.0/go.mod h1:URh3Au/fNbM0++WjBseurE3QTp875wiJ9ImrecD+7tI= +github.com/akrylysov/algnhsa v1.1.0 h1:G0SoP16tMRyiism7VNc3JFA0wq/cVgEkp/ExMVnc6PQ= +github.com/akrylysov/algnhsa v1.1.0/go.mod h1:+bOweRs/WBu5awl+ifCoSYAuKVPAmoTk8XOMrZ1xwiw= +github.com/aws/aws-lambda-go v1.46.0 h1:UWVnvh2h2gecOlFhHQfIPQcD8pL/f7pVCutmFl+oXU8= +github.com/aws/aws-lambda-go v1.46.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= +github.com/aws/aws-sdk-go v1.44.65 h1:G+kuQ0+kcg8ltLZqju3OA9NDtGsGuSDrNWaXwgYFEH8= +github.com/aws/aws-sdk-go v1.44.65/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= -github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU= -github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/paulmach/orb v0.9.0/go.mod h1:SudmOk85SXtmXAB3sLGyJ6tZy/8pdfrV0o6ef98Xc30= +github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/schollz/progressbar/v3 v3.13.0/go.mod h1:ZBYnSuLAX2LU8P8UiKN/KgF2DY58AJC8yfVYLPC8Ly4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tilezen/go-tilepacks v0.0.0-20210602151652-0147f7fb6fd7 h1:F7fy7uOoKFKjNLfqb5Eraed3WWyGLpWkg/4X7yrPMPg= -github.com/tilezen/go-tilepacks v0.0.0-20210602151652-0147f7fb6fd7/go.mod h1:hTW8qUKLnA2bzSdLJ5Z4iqYbcsp70PIA8+N6zEwyTRI= -github.com/tilezen/go-tilepacks v0.0.0-20210818174005-d960bc00ff89 h1:s0ihTlSC5aNkA8YNrLivsGZ6Ol0bAkB9sDRnbkE9xjM= -github.com/tilezen/go-tilepacks v0.0.0-20210818174005-d960bc00ff89/go.mod h1:rDT2rySxZvKbtwbFieq33/hURjbejX0biQzAU80XBwc= -github.com/whosonfirst/algnhsa v0.1.0 h1:8wVksKaz1qor2Y6+fiLEPEPxsEEvUQgSdtSTQLMrnlA= -github.com/whosonfirst/algnhsa v0.1.0/go.mod h1:swLBXxaVTv3s6dJLhekdQCuCTshUew+xHjptRC21RG0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tilezen/go-tilepacks v0.0.0-20230902185537-9a48755ab988 h1:VEptU2NK9WL1Ghrmc4WB4rLeq02h1cBTcCy//H7MOY0= +github.com/tilezen/go-tilepacks v0.0.0-20230902185537-9a48755ab988/go.mod h1:ujzM/j2qqURqJn+p7oyBccMPyZCOviBf1xo/5QAXiOE= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/http/handler.go b/http/handler.go index 9a5dab8..100ce62 100644 --- a/http/handler.go +++ b/http/handler.go @@ -1,11 +1,12 @@ package http import ( - "github.com/sfomuseum/go-mbtiles-server/tile" - "github.com/tilezen/go-tilepacks/tilepack" - "log" + "log/slog" gohttp "net/http" "strconv" + + "github.com/sfomuseum/go-mbtiles-server/tile" + "github.com/tilezen/go-tilepacks/tilepack" ) // MBTilesHandler will return a http.HandlerFunc handler for serving tile requests from 'catalog' using the @@ -27,17 +28,24 @@ func MBTilesHandlerWithParser(catalog map[string]tilepack.MbtilesReader, p tile. fn := func(rsp gohttp.ResponseWriter, req *gohttp.Request) { + logger := slog.Default() + logger = logger.With("path", req.URL.Path) + path := req.URL.Path tile_req, err := p.Parse(path) if err != nil { + logger.Error("Failed to parse tile path", "error", err) gohttp.NotFound(rsp, req) return } + logger = logger.With("layer", tile_req.Layer) + reader, ok := catalog[tile_req.Layer] if !ok { + logger.Error("Tile layer not found") gohttp.NotFound(rsp, req) return } @@ -45,19 +53,22 @@ func MBTilesHandlerWithParser(catalog map[string]tilepack.MbtilesReader, p tile. result, err := reader.GetTile(tile_req.Tile) if err != nil { - log.Printf("Error getting tile: %+v", err) + logger.Error("Failed to retrieve tile", "error", err) gohttp.NotFound(rsp, req) return } if result.Data == nil { + logger.Debug("Tile data is nil") gohttp.NotFound(rsp, req) return } l := len(*result.Data) str_l := strconv.Itoa(l) - + + logger.Debug("Serve tile", "content type", tile_req.ContentType, "length", l) + rsp.Header().Set("Content-Type", tile_req.ContentType) rsp.Header().Set("Content-Length", str_l) rsp.Write(*result.Data) diff --git a/tile/parser.go b/tile/parser.go index 5a44859..28cbf93 100644 --- a/tile/parser.go +++ b/tile/parser.go @@ -2,10 +2,11 @@ package tile import ( "fmt" - "github.com/tilezen/go-tilepacks/tilepack" - "github.com/aaronland/go-mimetypes" "regexp" "strconv" + + "github.com/aaronland/go-mimetypes" + "github.com/paulmach/orb/maptile" ) // TileParser is an interface for parsing URI paths in to *TileRequest instances. @@ -65,18 +66,14 @@ func (p *SimpleTileParser) Parse(path string) (*TileRequest, error) { if len(t) == 0 { return nil, fmt.Errorf("Unsupported extension '%s'", ext) } - + content_type := t[0] - + z, _ := strconv.ParseUint(match[2], 10, 32) x, _ := strconv.ParseUint(match[3], 10, 32) y, _ := strconv.ParseUint(match[4], 10, 32) - tile := &tilepack.Tile{ - Z: uint(z), - X: uint(x), - Y: uint(y), - } + tile := maptile.New(uint32(x), uint32(y), maptile.Zoom(z)) tile_req := &TileRequest{ Tile: tile, diff --git a/tile/request.go b/tile/request.go index 453a79b..90ae9c1 100644 --- a/tile/request.go +++ b/tile/request.go @@ -2,28 +2,29 @@ package tile import ( "fmt" - "github.com/tilezen/go-tilepacks/tilepack" - "github.com/aaronland/go-mimetypes" + + "github.com/aaronland/go-mimetypes" + "github.com/paulmach/orb/maptile" ) // TileRequest provides a simple struct encapsulating the specific details of a tile request. type TileRequest struct { // Tile is the underlying *tilepack.Tile instance where tile data is stored - Tile *tilepack.Tile + Tile maptile.Tile // Layer is the name of the MBTiles to read from - Layer string + Layer string // ContentType is the mime-type of the data to return ContentType string } // String returns a relative URI path for the TileRequest instance. func (t *TileRequest) String() string { - + ext := mimetypes.ExtensionsByType(t.ContentType) if len(ext) == 0 { - ext = []string{ ".???" } + ext = []string{".???"} } - + return fmt.Sprintf("%s/%d/%d/%d.%s", t.Layer, t.Tile.Z, t.Tile.X, t.Tile.Y, ext[0]) } diff --git a/vendor/github.com/aaronland/go-http-server/.gitignore b/vendor/github.com/aaronland/go-http-server/.gitignore index 9c54652..12a69f3 100644 --- a/vendor/github.com/aaronland/go-http-server/.gitignore +++ b/vendor/github.com/aaronland/go-http-server/.gitignore @@ -1,2 +1,5 @@ *~ -*.pem \ No newline at end of file +*.pem +bin +.DS_Store +*.zip \ No newline at end of file diff --git a/vendor/github.com/aaronland/go-http-server/Makefile b/vendor/github.com/aaronland/go-http-server/Makefile new file mode 100644 index 0000000..f438209 --- /dev/null +++ b/vendor/github.com/aaronland/go-http-server/Makefile @@ -0,0 +1,8 @@ +GOMOD=vendor + +lambda-example: + if test -f main; then rm -f main; fi + if test -f example.zip; then rm -f example.zip; fi + GOOS=linux go build -mod $(GOMOD) -ldflags="-s -w" -o main cmd/example/main.go + zip example.zip main + rm -f main diff --git a/vendor/github.com/aaronland/go-http-server/README.md b/vendor/github.com/aaronland/go-http-server/README.md index 1703b9f..406a847 100644 --- a/vendor/github.com/aaronland/go-http-server/README.md +++ b/vendor/github.com/aaronland/go-http-server/README.md @@ -1,5 +1,13 @@ # go-http-server +Go package to provide interfaces and implementation for HTTP servers. + +It is mostly a syntactic wrapper around existing HTTP server implementation with a common interface configured using a URI-based syntax. + +## Documentation + +[![Go Reference](https://pkg.go.dev/badge/github.com/aaronland/go-http-server.svg)](https://pkg.go.dev/github.com/aaronland/go-http-server) + ## Example _Error hanldling has been removed for the sake of brevity._ @@ -90,23 +98,14 @@ func (s *HTTPServer) ListenAndServe(ctx context.Context, mux *http.ServeMux) err } ``` -## Interfaces - -### Server - -``` -type Server interface { - ListenAndServe(context.Context, *http.ServeMux) error - Address() string -} - -type ServerInitializeFunc func(context.Context, string) (Server, error) -``` - ## Server schemes The following schemes/implementations are included by default with this package. +### functionurl:// + +An AWS Lambda Function URL compatible HTTP server. + ### http://{HOST} A standard, plain-vanilla, HTTP server. @@ -117,7 +116,7 @@ This is an alias to the `tls://` scheme. ### lambda:// -An AWS-compatible HTTP server. +An AWS Lambda function + API Gateway compatible HTTP server. ### mkcert://{HOST} @@ -129,4 +128,5 @@ A standard, plain-vanilla, HTTPS/TLS server. You must provide TLS certificate an ## See also -* https://github.com/FiloSottile/mkcert \ No newline at end of file +* https://github.com/akrylysov/algnhsa +* https://github.com/FiloSottile/mkcert diff --git a/vendor/github.com/aaronland/go-http-server/doc.go b/vendor/github.com/aaronland/go-http-server/doc.go new file mode 100644 index 0000000..585dfb4 --- /dev/null +++ b/vendor/github.com/aaronland/go-http-server/doc.go @@ -0,0 +1,4 @@ +// package server provides interfaces and implementation for HTTP servers. It is mostly +// a syntactic wrapper around existing HTTP server implementation with a common interface +// configured using a URI-based syntax. +package server diff --git a/vendor/github.com/aaronland/go-http-server/go.mod b/vendor/github.com/aaronland/go-http-server/go.mod deleted file mode 100644 index 1d2f23e..0000000 --- a/vendor/github.com/aaronland/go-http-server/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/aaronland/go-http-server - -go 1.12 - -require ( - github.com/aaronland/go-roster v0.0.2 - github.com/whosonfirst/algnhsa v0.1.0 -) diff --git a/vendor/github.com/aaronland/go-http-server/go.sum b/vendor/github.com/aaronland/go-http-server/go.sum deleted file mode 100644 index 31f55a9..0000000 --- a/vendor/github.com/aaronland/go-http-server/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -github.com/aaronland/go-roster v0.0.1 h1:r1l4n1HfWvEtOXZvhfXX0Won9Xf6QJsigdUdzOtuy/M= -github.com/aaronland/go-roster v0.0.1/go.mod h1:AcovpxlG1XxJxX2Fjqlm63fEIBhCjEIBV4lP87FZDmI= -github.com/aaronland/go-roster v0.0.2 h1:2Fu7v4VQLRLRL/Zgr6R9S5JxsW75Ab/K88QtMVX532s= -github.com/aaronland/go-roster v0.0.2/go.mod h1:AcovpxlG1XxJxX2Fjqlm63fEIBhCjEIBV4lP87FZDmI= -github.com/akrylysov/algnhsa v0.0.0-20190319020909-05b3d192e9a7/go.mod h1:HhzjNA0EjUWcwHTUMwqrpeAdIF3gRmpH0HpWx1hYJSc= -github.com/aws/aws-lambda-go v1.9.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= -github.com/aws/aws-lambda-go v1.10.0 h1:uafgdfYGQD0UeT7d2uKdyWW8j/ZYRifRPIdmeqLzLCk= -github.com/aws/aws-lambda-go v1.10.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/whosonfirst/algnhsa v0.1.0 h1:8wVksKaz1qor2Y6+fiLEPEPxsEEvUQgSdtSTQLMrnlA= -github.com/whosonfirst/algnhsa v0.1.0/go.mod h1:swLBXxaVTv3s6dJLhekdQCuCTshUew+xHjptRC21RG0= diff --git a/vendor/github.com/aaronland/go-http-server/http.go b/vendor/github.com/aaronland/go-http-server/http.go index cc42b17..95ba9bd 100644 --- a/vendor/github.com/aaronland/go-http-server/http.go +++ b/vendor/github.com/aaronland/go-http-server/http.go @@ -22,8 +22,10 @@ import ( func init() { ctx := context.Background() RegisterServer(ctx, "http", NewHTTPServer) + RegisterServer(ctx, "https", NewHTTPServer) } +// HTTPServer implements the `Server` interface for a basic `net/http` server. type HTTPServer struct { Server url *url.URL @@ -32,6 +34,19 @@ type HTTPServer struct { key string } +// NewHTTPServer returns a new `HTTPServer` instance configured by 'uri' which is +// expected to be defined in the form of: +// +// {SCHEME}://{ADDRESS}:{PORT}?{PARAMETERS} +// +// Where {SCHEME} is either 'http' or 'https'; {ADDRESS} and {PORT} are the address +// and port to listen for requests on. Valid parameters are: +// * `tls_cert={CERTIFICATE}` The path for a TLS certificate to use; required if {SCHEME} is 'https'. +// * `tls_key={KEY}` The path for a TLS key to use; required if {SCHEME} is 'https' +// * `read_timeout={SECONDS}` A custom setting for HTTP read timeouts. Default is 2 seconds. +// * `write_timeout={SECONDS}` A custom setting for HTTP write timeouts. Default is 10 seconds. +// * `idle_timeout={SECONDS}` A custom setting for HTTP idle timeouts. Default is 15 seconds. +// * `header_timeout={SECONDS}` A custom setting for HTTP header timeouts. Default is 2 seconds. func NewHTTPServer(ctx context.Context, uri string) (Server, error) { u, err := url.Parse(uri) @@ -138,6 +153,7 @@ func NewHTTPServer(ctx context.Context, uri string) (Server, error) { return &server, nil } +// Address returns the fully-qualified URI where the server instance can be contacted. func (s *HTTPServer) Address() string { u, _ := url.Parse(s.url.String()) @@ -146,6 +162,7 @@ func (s *HTTPServer) Address() string { return u.String() } +// ListenAndServe starts the server and listens for requests using 'mux' for routing. func (s *HTTPServer) ListenAndServe(ctx context.Context, mux http.Handler) error { idleConnsClosed := make(chan struct{}) diff --git a/vendor/github.com/aaronland/go-http-server/lambda.go b/vendor/github.com/aaronland/go-http-server/lambda_apigateway.go similarity index 57% rename from vendor/github.com/aaronland/go-http-server/lambda.go rename to vendor/github.com/aaronland/go-http-server/lambda_apigateway.go index 2e842ea..5c36911 100644 --- a/vendor/github.com/aaronland/go-http-server/lambda.go +++ b/vendor/github.com/aaronland/go-http-server/lambda_apigateway.go @@ -2,10 +2,12 @@ package server import ( "context" - "github.com/whosonfirst/algnhsa" + "fmt" _ "log" "net/http" "net/url" + + "github.com/akrylysov/algnhsa" ) func init() { @@ -13,18 +15,26 @@ func init() { RegisterServer(ctx, "lambda", NewLambdaServer) } +// LambdaServer implements the `Server` interface for a use in a AWS Lambda + API Gateway context. type LambdaServer struct { Server url *url.URL binary_types []string } +// NewLambdaServer returns a new `LambdaServer` instance configured by 'uri' which is +// expected to be defined in the form of: +// +// lambda://?{PARAMETERS} +// +// Valid parameters are: +// * `binary_type={MIMETYPE}` One or more mimetypes to be served by AWS API Gateway as binary content types. func NewLambdaServer(ctx context.Context, uri string) (Server, error) { u, err := url.Parse(uri) if err != nil { - return nil, err + return nil, fmt.Errorf("Failed to parse URI, %w", err) } server := LambdaServer{ @@ -42,10 +52,12 @@ func NewLambdaServer(ctx context.Context, uri string) (Server, error) { return &server, nil } +// Address returns the fully-qualified URL used to instantiate 's'. func (s *LambdaServer) Address() string { return s.url.String() } +// ListenAndServe starts the serve and listens for requests using 'mux' for routing. func (s *LambdaServer) ListenAndServe(ctx context.Context, mux http.Handler) error { lambda_opts := new(algnhsa.Options) diff --git a/vendor/github.com/aaronland/go-http-server/lambda_functionurl.go b/vendor/github.com/aaronland/go-http-server/lambda_functionurl.go new file mode 100644 index 0000000..280c806 --- /dev/null +++ b/vendor/github.com/aaronland/go-http-server/lambda_functionurl.go @@ -0,0 +1,170 @@ +package server + +// https://github.com/aws/aws-lambda-go/blob/main/events/README_Lambda.md + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strings" + + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/lambda" +) + +func init() { + ctx := context.Background() + RegisterServer(ctx, "functionurl", NewLambdaFunctionURLServer) +} + +// LambdaFunctionURLServer implements the `Server` interface for a use in a AWS LambdaFunctionURL + API Gateway context. +type LambdaFunctionURLServer struct { + Server + handler http.Handler + binaryContentTypes map[string]bool +} + +// NewLambdaFunctionURLServer returns a new `LambdaFunctionURLServer` instance configured by 'uri' which is +// expected to be defined in the form of: +// +// functionurl://?{PARAMETERS} +// +// Valid parameters are: +// * `binary_type={MIMETYPE}` One or more mimetypes to be served by AWS FunctionURLs as binary content types. +func NewLambdaFunctionURLServer(ctx context.Context, uri string) (Server, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, fmt.Errorf("Failed to parse URI, %w", err) + } + + q := u.Query() + + binary_types := make(map[string]bool) + + for _, t := range q["binary_type"] { + binary_types[t] = true + } + + server := LambdaFunctionURLServer{ + binaryContentTypes: binary_types, + } + + return &server, nil +} + +// Address returns the fully-qualified URL used to instantiate 's'. +func (s *LambdaFunctionURLServer) Address() string { + return "functionurl://" +} + +// ListenAndServe starts the serve and listens for requests using 'mux' for routing. +func (s *LambdaFunctionURLServer) ListenAndServe(ctx context.Context, mux http.Handler) error { + s.handler = mux + lambda.Start(s.handleRequest) + return nil +} + +func (s *LambdaFunctionURLServer) handleRequest(ctx context.Context, request events.LambdaFunctionURLRequest) (events.LambdaFunctionURLResponse, error) { + + req, err := newHTTPRequest(ctx, request) + + if err != nil { + return events.LambdaFunctionURLResponse{Body: err.Error(), StatusCode: 500}, nil + } + + rec := httptest.NewRecorder() + s.handler.ServeHTTP(rec, req) + + rsp := rec.Result() + + event_rsp_headers := make(map[string]string) + + for k, v := range rsp.Header { + event_rsp_headers[k] = strings.Join(v, ",") + } + + event_rsp := events.LambdaFunctionURLResponse{ + StatusCode: rsp.StatusCode, + Headers: event_rsp_headers, + } + + content_type := rsp.Header.Get("Content-Type") + + if s.binaryContentTypes[content_type] { + event_rsp.Body = base64.StdEncoding.EncodeToString(rec.Body.Bytes()) + event_rsp.IsBase64Encoded = true + } else { + event_rsp.Body = rec.Body.String() + } + + return event_rsp, nil +} + +// This was clone and modified as necessary from https://github.com/akrylysov/algnhsa/blob/master/request.go#L30 +// so there may still be issues. + +// https://docs.aws.amazon.com/lambda/latest/dg/urls-invocation.html + +func newHTTPRequest(ctx context.Context, event events.LambdaFunctionURLRequest) (*http.Request, error) { + + // https://pkg.go.dev/github.com/aws/aws-lambda-go/events#LambdaFunctionURLRequest + // https://pkg.go.dev/github.com/aws/aws-lambda-go/events#LambdaFunctionURLRequestContextHTTPDescription + + rawQuery := event.RawQueryString + + if len(rawQuery) == 0 { + + params := url.Values{} + + for k, v := range event.QueryStringParameters { + params.Set(k, v) + } + + rawQuery = params.Encode() + } + + headers := make(http.Header) + + for k, v := range event.Headers { + headers.Set(k, v) + } + + unescapedPath, err := url.PathUnescape(event.RawPath) + + if err != nil { + return nil, err + } + u := url.URL{ + Host: headers.Get("Host"), + Path: unescapedPath, + RawQuery: rawQuery, + } + + // Handle base64 encoded body. + + var body io.Reader = strings.NewReader(event.Body) + + if event.IsBase64Encoded { + body = base64.NewDecoder(base64.StdEncoding, body) + } + + req_context := event.RequestContext + + r, err := http.NewRequestWithContext(ctx, req_context.HTTP.Method, u.String(), body) + + if err != nil { + return nil, fmt.Errorf("Failed to create new HTTP request, %w", err) + } + + r.RemoteAddr = req_context.HTTP.SourceIP + r.RequestURI = u.RequestURI() + + r.Header = headers + return r, nil +} diff --git a/vendor/github.com/aaronland/go-http-server/mkcert.go b/vendor/github.com/aaronland/go-http-server/mkcert.go index af57259..419dc63 100644 --- a/vendor/github.com/aaronland/go-http-server/mkcert.go +++ b/vendor/github.com/aaronland/go-http-server/mkcert.go @@ -21,6 +21,14 @@ func init() { RegisterServer(ctx, "mkcert", NewMkCertServer) } +// NewMkCertServer returns a new `HTTPServer` instance configured using 'uri' +// in the form of: +// +// mkcert://?{PARAMETERS} +// +// Valid parameters are: +// - `root={PATH}` An optional path to specify where `mkcert` certificates and keys should be created. If missing +// the operating system's temporary directory will be used. func NewMkCertServer(ctx context.Context, uri string) (Server, error) { u, err := url.Parse(uri) @@ -81,6 +89,7 @@ func NewMkCertServer(ctx context.Context, uri string) (Server, error) { return NewServer(ctx, https_uri) } +// mkCert creates a new TLS certificate and key using the `mkcert` binary stored in 'root'. func mkCert(u *url.URL, root string) (string, string, error) { err := mkCertInstall() @@ -116,6 +125,7 @@ func mkCert(u *url.URL, root string) (string, string, error) { return cert_path, key_path, nil } +// mkCertInstall ensures that the `mkcert` binary is present and can be executed. func mkCertInstall() error { // unfortunately there is no way from the CLI tool to check whether diff --git a/vendor/github.com/aaronland/go-http-server/server.go b/vendor/github.com/aaronland/go-http-server/server.go index 2a5717e..78f851b 100644 --- a/vendor/github.com/aaronland/go-http-server/server.go +++ b/vendor/github.com/aaronland/go-http-server/server.go @@ -2,18 +2,24 @@ package server import ( "context" - "github.com/aaronland/go-roster" + "fmt" _ "log" "net/http" "net/url" - "strings" + "sort" + + "github.com/aaronland/go-roster" ) +// type Server is an interface for creating server instances that serve requests using a `http.Handler` router. type Server interface { + // ListenAndServe starts the server and listens for requests using a `http.Handler` instance for routing. ListenAndServe(context.Context, http.Handler) error + // Address returns the fully-qualified URI that the server is listening for requests on. Address() string } +// ServeritializeFunc is a function used to initialize an implementation of the `Server` interface. type ServerInitializeFunc func(context.Context, string) (Server, error) var servers roster.Roster @@ -34,6 +40,7 @@ func ensureServers() error { return nil } +// RegisterServer() associates 'scheme' with 'f' in an internal list of avilable `Server` implementations. func RegisterServer(ctx context.Context, scheme string, f ServerInitializeFunc) error { err := ensureServers() @@ -45,6 +52,8 @@ func RegisterServer(ctx context.Context, scheme string, f ServerInitializeFunc) return servers.Register(ctx, scheme, f) } +// NewServer() returns a new instance of `Server` for the scheme associated with 'uri'. It is assumed that this scheme +// will have previously been "registered" with the `RegisterServer` method. func NewServer(ctx context.Context, uri string) (Server, error) { err := ensureServers() @@ -71,12 +80,17 @@ func NewServer(ctx context.Context, uri string) (Server, error) { return f(ctx, uri) } +// Schemes() returns the list of schemes that have been "registered". func Schemes() []string { ctx := context.Background() - return servers.Drivers(ctx) -} + drivers := servers.Drivers(ctx) + + schemes := make([]string, len(drivers)) + + for idx, dr := range drivers { + schemes[idx] = fmt.Sprintf("%s://", dr) + } -func SchemesAsString() string { - schemes := Schemes() - return strings.Join(schemes, ",") + sort.Strings(schemes) + return schemes } diff --git a/vendor/github.com/aaronland/go-http-server/tls.go b/vendor/github.com/aaronland/go-http-server/tls.go deleted file mode 100644 index f5916bf..0000000 --- a/vendor/github.com/aaronland/go-http-server/tls.go +++ /dev/null @@ -1,40 +0,0 @@ -package server - -// https://github.com/FiloSottile/mkcert/issues/154 -// https://smallstep.com/blog/private-acme-server/ - -import ( - "context" - "errors" - "net/url" -) - -func init() { - ctx := context.Background() - RegisterServer(ctx, "https", NewTLSServer) - RegisterServer(ctx, "tls", NewTLSServer) -} - -func NewTLSServer(ctx context.Context, uri string) (Server, error) { - - u, err := url.Parse(uri) - - if err != nil { - return nil, err - } - - q := u.Query() - - tls_cert := q.Get("cert") - tls_key := q.Get("key") - - if tls_cert == "" { - return nil, errors.New("Missing TLS cert parameter") - } - - if tls_key == "" { - return nil, errors.New("Missing TLS key parameter") - } - - return NewHTTPServer(ctx, uri) -} diff --git a/vendor/github.com/aaronland/go-mimetypes/go.mod b/vendor/github.com/aaronland/go-mimetypes/go.mod deleted file mode 100644 index 2af2537..0000000 --- a/vendor/github.com/aaronland/go-mimetypes/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/aaronland/go-mimetypes - -go 1.12 diff --git a/vendor/github.com/aaronland/go-roster/README.md b/vendor/github.com/aaronland/go-roster/README.md index 66ad731..e630b02 100644 --- a/vendor/github.com/aaronland/go-roster/README.md +++ b/vendor/github.com/aaronland/go-roster/README.md @@ -1,2 +1,129 @@ # go-roster +Go package provides interfaces and methods for defining internal lookup tables (or "rosters") for registering and instantiatinge custom interfaces with multiple implementations. + +## Documentation + +[![Go Reference](https://pkg.go.dev/badge/github.com/aaronland/go-roster.svg)](https://pkg.go.dev/github.com/aaronland/go-roster) + +## Example + +The following example is the body of the [roster_test.go](roster_test.go) file: + +``` +package roster + +import ( + "context" + "fmt" + "net/url" + "testing" +) + +// Create a toy interface that might have multiple implementations including a common +// method signature for creating instantiations of that interface. + +type Example interface { + String() string +} + +type ExampleInitializationFunc func(context.Context, string) (Example, error) + +func RegisterExample(ctx context.Context, scheme string, init_func ExampleInitializationFunc) error { + return example_roster.Register(ctx, scheme, init_func) +} + +func NewExample(ctx context.Context, uri string) (Example, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, fmt.Errorf("Failed to parse URI, %w", err) + } + + scheme := u.Scheme + + i, err := example_roster.Driver(ctx, scheme) + + if err != nil { + return nil, fmt.Errorf("Failed to find registeration for %s, %w", scheme, err) + } + + init_func := i.(ExampleInitializationFunc) + return init_func(ctx, uri) +} + +// Something that implements the Example interface + +type StringExample struct { + Example + value string +} + +func NewStringExample(ctx context.Context, uri string) (Example, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, fmt.Errorf("Failed to parse URL, %w", err) + } + + s := &StringExample{ + value: u.Path, + } + + return s, nil +} + +func (e *StringExample) String() string { + return e.value +} + +// Create a global "roster" of implementations of the Example interface + +var example_roster Roster + +// Ensure that there is a valid roster (for use by the code handling the Example interface) +// and register the StringExample implementation + +func init() { + + ctx := context.Background() + + r, err := NewDefaultRoster() + + if err != nil { + panic(err) + } + + example_roster = r + + err = RegisterExample(ctx, "string", NewStringExample) + + if err != nil { + panic(err) + } +} + +func TestRoster(t *testing.T) { + + ctx := context.Background() + + e, err := NewExample(ctx, "string:///helloworld") + + if err != nil { + t.Fatalf("Failed to create new example, %v", err) + } + + v := e.String() + + if v != "/helloworld" { + t.Fatalf("Unexpected result: '%s'", v) + } +} +``` + +## Concrete examples + +* https://github.com/whosonfirst/go-reader +* https://github.com/whosonfirst/go-writer \ No newline at end of file diff --git a/vendor/github.com/aaronland/go-roster/default.go b/vendor/github.com/aaronland/go-roster/default.go index aa7e950..2e3e337 100644 --- a/vendor/github.com/aaronland/go-roster/default.go +++ b/vendor/github.com/aaronland/go-roster/default.go @@ -9,12 +9,14 @@ import ( "sync" ) +// DefaultRoster implements the the `Roster` interface mapping scheme names to arbitrary interface values. type DefaultRoster struct { Roster mu *sync.RWMutex drivers map[string]interface{} } +// NewDefaultRoster returns a new `DefaultRoster` instance. func NewDefaultRoster() (Roster, error) { mu := new(sync.RWMutex) @@ -28,6 +30,8 @@ func NewDefaultRoster() (Roster, error) { return dr, nil } +// Driver returns the value associated with the key for the normalized value of 'name' in the list of registered +// drivers available to 'dr'. func (dr *DefaultRoster) Driver(ctx context.Context, name string) (interface{}, error) { nrml_name := dr.NormalizeName(ctx, name) @@ -38,13 +42,13 @@ func (dr *DefaultRoster) Driver(ctx context.Context, name string) (interface{}, i, ok := dr.drivers[nrml_name] if !ok { - msg := fmt.Sprintf("Unknown driver: %s (%s)", name, nrml_name) - return nil, errors.New(msg) + return nil, fmt.Errorf("Unknown driver: %s (%s)", name, nrml_name) } return i, nil } +// Registers creates a new entry in the list of drivers available to 'dr' mapping the normalized version of 'name' to 'i'. func (dr *DefaultRoster) Register(ctx context.Context, name string, i interface{}) error { dr.mu.Lock() @@ -59,13 +63,14 @@ func (dr *DefaultRoster) Register(ctx context.Context, name string, i interface{ _, dup := dr.drivers[nrml_name] if dup { - return errors.New("Register called twice for reader " + name) + return fmt.Errorf("Register called twice for reader '%s'", name) } dr.drivers[nrml_name] = i return nil } +// UnregisterAll removes all the registers drivers from 'dr'. func (dr *DefaultRoster) UnregisterAll(ctx context.Context) error { dr.mu.Lock() defer dr.mu.Unlock() @@ -74,10 +79,12 @@ func (dr *DefaultRoster) UnregisterAll(ctx context.Context) error { return nil } +// NormalizeName returns a normalized (upper-cased) version of 'name'. func (dr *DefaultRoster) NormalizeName(ctx context.Context, name string) string { return strings.ToUpper(name) } +// Drivers returns the list of registered schemes for all the drivers available to 'dr'. func (dr *DefaultRoster) Drivers(ctx context.Context) []string { dr.mu.RLock() diff --git a/vendor/github.com/aaronland/go-roster/doc.go b/vendor/github.com/aaronland/go-roster/doc.go new file mode 100644 index 0000000..ca273f6 --- /dev/null +++ b/vendor/github.com/aaronland/go-roster/doc.go @@ -0,0 +1,4 @@ +// package roster provides interfaces and methods for defining internal lookup tables for registering +// and instantiatinge custom interfaces with multiple implementations. The expectation is that these +// (roster) interfaces will never be seen by user-facing code. +package roster diff --git a/vendor/github.com/aaronland/go-roster/go.mod b/vendor/github.com/aaronland/go-roster/go.mod deleted file mode 100644 index eda0061..0000000 --- a/vendor/github.com/aaronland/go-roster/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/aaronland/go-roster - -go 1.12 diff --git a/vendor/github.com/aaronland/go-roster/roster.go b/vendor/github.com/aaronland/go-roster/roster.go index c667e72..44b9333 100644 --- a/vendor/github.com/aaronland/go-roster/roster.go +++ b/vendor/github.com/aaronland/go-roster/roster.go @@ -4,10 +4,16 @@ import ( "context" ) +// type Roster is an interface for defining internal lookup tables (or "rosters") for registering and instantiatinge custom interfaces with multiple implementations. type Roster interface { + // Driver returns the value associated with a name or scheme from the list of drivers that have been registered. Driver(context.Context, string) (interface{}, error) + // Drivers returns the list of names or schemes for the list of drivers that have been registered. Drivers(context.Context) []string + // UnregisterAll removes all the registered drivers from an instance implementing the Roster interfave. UnregisterAll(context.Context) error + // NormalizeName returns a normalized version of a string. NormalizeName(context.Context, string) string + // Register associated a name or scheme with an arbitrary interface. Register(context.Context, string, interface{}) error } diff --git a/vendor/github.com/aaronland/go-string/dsn/dsn.go b/vendor/github.com/aaronland/go-string/dsn/dsn.go index 7b6432d..949d6b0 100644 --- a/vendor/github.com/aaronland/go-string/dsn/dsn.go +++ b/vendor/github.com/aaronland/go-string/dsn/dsn.go @@ -1,3 +1,4 @@ +// package dsn provides methods for working with data source name (DSN) strings. package dsn import ( @@ -7,8 +8,10 @@ import ( "strings" ) +// DSN represents a data source name (DSN) style string. type DSN map[string]string +// Keys returns the list of key names contained in 'dsn'. func (dsn DSN) Keys() []string { keys := make([]string, 0) @@ -21,6 +24,7 @@ func (dsn DSN) Keys() []string { return keys } +// String returns a string representation of 'dsn'. func (dsn DSN) String() string { pairs := make([]string, 0) @@ -34,6 +38,7 @@ func (dsn DSN) String() string { return strings.Join(pairs, " ") } +// StringToDSN parses 'str_dsn` in to a `DSN` instance. func StringToDSN(str_dsn string) (DSN, error) { str_dsn = strings.Trim(str_dsn, " ") @@ -68,6 +73,8 @@ func StringToDSN(str_dsn string) (DSN, error) { return dsn, nil } +// StringToDSNWithKeys parse 'str_dsn' in to a `DSN` instance ensuring that the instance contains +// keys matching 'keys'. func StringToDSNWithKeys(str_dsn string, keys ...string) (DSN, error) { dsn, err := StringToDSN(str_dsn) diff --git a/vendor/github.com/akrylysov/algnhsa/CHANGELOG.md b/vendor/github.com/akrylysov/algnhsa/CHANGELOG.md new file mode 100644 index 0000000..b5643fa --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/CHANGELOG.md @@ -0,0 +1,64 @@ +# Changelog + +## [1.1.0] - 2023-12-29 +### Added +- `New` function, which returns a new lambda handler for the given http.Handler (@acoover). +### Changed +- aws-lambda-go upgraded to v1.43.0 + +## [1.0.0] - 2023-03-02 +### Fixed +- API Gateway V2: Fixed response header support. +- API Gateway V2: Fixed handling request cookies. +- API Gateway V2: Fixed multi-value query parameters. +- ALB: Fixed double escaping of query parameters. + +### Changed +- `RequestTypeAPIGateway` renamed to `RequestTypeAPIGatewayV1`. +- `ProxyRequestFromContext` renamed to `APIGatewayV1RequestFromContext`. +- `APIGatewayV2HTTPRequestFromContext` renamed to `APIGatewayV2RequestFromContext`. +- `TargetGroupRequestFromContext` renamed to `ALBRequestFromContext`. +- Improved unit tests. +- Go 1.18 is the minimum supported version now. + +## [0.13.0] - 2022-01-08 +### Added +- API Gateway V2 support (@a-h). + +## [0.12.1] - 2019-09-26 +### Fixed +- Compatibility with Go versions older than 1.13. + +## [0.12.0] - 2019-09-26 +### Added +- ALB support (thanks @adimarco and @ARolek for feedback). + +## [0.11.0] - 2019-03-18 +### Added +- Go Modules support. + +## [0.10] - 2019-02-03 +### Changed +- Set RequestURI on request (@RossHammer). +- Unescape Path (@RossHammer). +- Multi-value header support implemented using `APIGatewayProxyResponse.MultiValueHeaders`. + +## [0.9] - 2018-12-10 +### Added +- Support multi-value query string parameters and headers in requests. + +## [0.8] - 2018-07-29 +### Added +- Workaround for API Gateway not supporting headers with multiple values (@mspiegel). + +## [0.7] - 2018-06-08 +### Added +- UseProxyPath option - strips the base path mapping when using a custom domain with API Gateway. + +## [0.6] - 2018-05-30 +### Changed +- Set Host header for requests (@rvdwijngaard). + +## [0.5] - 2018-02-05 +### Added +- Context support. diff --git a/vendor/github.com/whosonfirst/algnhsa/LICENSE b/vendor/github.com/akrylysov/algnhsa/LICENSE similarity index 100% rename from vendor/github.com/whosonfirst/algnhsa/LICENSE rename to vendor/github.com/akrylysov/algnhsa/LICENSE diff --git a/vendor/github.com/akrylysov/algnhsa/README.md b/vendor/github.com/akrylysov/algnhsa/README.md new file mode 100644 index 0000000..68a4385 --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/README.md @@ -0,0 +1,164 @@ +# algnhsa [![GoDoc](https://godoc.org/github.com/akrylysov/algnhsa?status.svg)](https://godoc.org/github.com/akrylysov/algnhsa) ![Build Status](https://github.com/akrylysov/algnhsa/actions/workflows/test.yaml/badge.svg) + +algnhsa is an AWS Lambda Go `net/http` server adapter. + +algnhsa enables running Go web applications on AWS Lambda and API Gateway or ALB without changing the existing HTTP handlers: + +```go +package main + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/akrylysov/algnhsa" +) + +func addHandler(w http.ResponseWriter, r *http.Request) { + f, _ := strconv.Atoi(r.FormValue("first")) + s, _ := strconv.Atoi(r.FormValue("second")) + w.Header().Set("X-Hi", "foo") + fmt.Fprintf(w, "%d", f+s) +} + +func contextHandler(w http.ResponseWriter, r *http.Request) { + lambdaEvent, ok := algnhsa.APIGatewayV2RequestFromContext(r.Context()) + if ok { + fmt.Fprint(w, lambdaEvent.RequestContext.AccountID) + } +} + +func main() { + http.HandleFunc("/add", addHandler) + http.HandleFunc("/context", contextHandler) + algnhsa.ListenAndServe(http.DefaultServeMux, nil) +} +``` + +## Plug in a third-party web framework + +### Gin + +```go +package main + +import ( + "net/http" + + "github.com/akrylysov/algnhsa" + "github.com/gin-gonic/gin" +) + +func main() { + r := gin.Default() + r.GET("/", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "message": "hi", + }) + }) + algnhsa.ListenAndServe(r, nil) +} +``` + +### echo + +```go +package main + +import ( + "net/http" + + "github.com/akrylysov/algnhsa" + "github.com/labstack/echo/v4" +) + +func main() { + e := echo.New() + e.GET("/", func(c echo.Context) error { + return c.String(http.StatusOK, "hi") + }) + algnhsa.ListenAndServe(e, nil) +} +``` + +### chi + +```go +package main + +import ( + "net/http" + + "github.com/akrylysov/algnhsa" + "github.com/go-chi/chi" +) + +func main() { + r := chi.NewRouter() + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi")) + }) + algnhsa.ListenAndServe(r, nil) +} +``` + +### Fiber + +```go +package main + +import ( + "github.com/akrylysov/algnhsa" + "github.com/gofiber/fiber/v2" + "github.com/gofiber/fiber/v2/middleware/adaptor" +) + +func main() { + app := fiber.New() + app.Get("/", func(c *fiber.Ctx) error { + return c.SendString("Hello, World!") + }) + algnhsa.ListenAndServe(adaptor.FiberApp(app), nil) +} +``` + +## Deployment + +First, build your Go application for Linux and zip it: + +```bash +GOOS=linux GOARCH=amd64 go build -tags lambda.norpc -o bootstrap +zip function.zip bootstrap +``` + +When creating a new function, choose the "Provide your own bootstrap on Amazon Linux 2" runtime or "Custom runtime on Amazon Linux 2" when modifying an existing function. Make sure to use `bootstrap` as the executable name and as the handler name in AWS. + +AWS provides plenty of ways to expose a Lambda function to the internet. + +### Lambda Function URL + +This is the easier way to deploy your Lambda function as an HTTP endpoint. +It only requires going to the "Function URL" section of the Lambda function configuration and clicking "Configure Function URL". + +### API Gateway + +#### HTTP API + +1. Create a new HTTP API. + +2. Configure a catch-all `$default` route. + +#### REST API + +1. Create a new REST API. + +2. In the "Resources" section create a new `ANY` method to handle requests to `/` (check "Use Lambda Proxy Integration"). + +3. Add a catch-all `{proxy+}` resource to handle requests to every other path (check "Configure as proxy resource"). + +### ALB + +1. Create a new ALB and point it to your Lambda function. + +2. In the target group settings in the "Attributes" section enable "Multi value headers". diff --git a/vendor/github.com/akrylysov/algnhsa/adapter.go b/vendor/github.com/akrylysov/algnhsa/adapter.go new file mode 100644 index 0000000..3952174 --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/adapter.go @@ -0,0 +1,65 @@ +package algnhsa + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + + "github.com/aws/aws-lambda-go/lambda" +) + +// New returns a new lambda handler for the given http.Handler. +// It is up to the caller of New to run lamdba.Start(handler) with the returned handler. +func New(handler http.Handler, opts *Options) lambda.Handler { + if handler == nil { + handler = http.DefaultServeMux + } + if opts == nil { + opts = defaultOptions + } + opts.setBinaryContentTypeMap() + return lambdaHandler{httpHandler: handler, opts: opts} +} + +var defaultOptions = &Options{} + +type lambdaHandler struct { + httpHandler http.Handler + opts *Options +} + +func (handler lambdaHandler) Invoke(ctx context.Context, payload []byte) ([]byte, error) { + resp, err := handler.handleEvent(ctx, payload) + if err != nil { + return nil, err + } + if handler.opts.DebugLog { + fmt.Printf("Response: %+v", resp) + } + return json.Marshal(resp) +} + +func (handler lambdaHandler) handleEvent(ctx context.Context, payload []byte) (lambdaResponse, error) { + if handler.opts.DebugLog { + fmt.Printf("Request: %s", payload) + } + eventReq, err := newLambdaRequest(ctx, payload, handler.opts) + if err != nil { + return lambdaResponse{}, err + } + r, err := newHTTPRequest(eventReq) + if err != nil { + return lambdaResponse{}, err + } + w := httptest.NewRecorder() + handler.httpHandler.ServeHTTP(w, r) + return newLambdaResponse(w, handler.opts.binaryContentTypeMap, eventReq.requestType) +} + +// ListenAndServe starts the AWS Lambda runtime (aws-lambda-go lambda.Start) with a given handler. +func ListenAndServe(handler http.Handler, opts *Options) { + lambdaHandler := New(handler, opts) + lambda.StartHandler(lambdaHandler) +} diff --git a/vendor/github.com/akrylysov/algnhsa/alb.go b/vendor/github.com/akrylysov/algnhsa/alb.go new file mode 100644 index 0000000..d71abb7 --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/alb.go @@ -0,0 +1,87 @@ +package algnhsa + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/url" + "strings" + + "github.com/aws/aws-lambda-go/events" +) + +/* +AWS Documentation: + +- https://docs.aws.amazon.com/lambda/latest/dg/services-alb.html +*/ + +var ( + errALBUnexpectedRequest = errors.New("expected ALBTargetGroupRequest event") + errALBExpectedMultiValueHeaders = errors.New("expected multi value headers; enable Multi value headers in target group settings") +) + +func getALBSourceIP(event events.ALBTargetGroupRequest) string { + if xff, ok := event.MultiValueHeaders["x-forwarded-for"]; ok && len(xff) > 0 { + ips := strings.SplitN(xff[0], ",", 2) + if len(ips) > 0 { + return ips[0] + } + } + return "" +} + +func newALBRequest(ctx context.Context, payload []byte, opts *Options) (lambdaRequest, error) { + var event events.ALBTargetGroupRequest + if err := json.Unmarshal(payload, &event); err != nil { + return lambdaRequest{}, err + } + if event.RequestContext.ELB.TargetGroupArn == "" { + return lambdaRequest{}, errALBUnexpectedRequest + } + if len(event.MultiValueHeaders) == 0 { + return lambdaRequest{}, errALBExpectedMultiValueHeaders + } + + for _, vals := range event.MultiValueQueryStringParameters { + for i, v := range vals { + unescaped, err := url.QueryUnescape(v) + if err != nil { + return lambdaRequest{}, err + } + vals[i] = unescaped + } + } + + req := lambdaRequest{ + HTTPMethod: event.HTTPMethod, + Path: event.Path, + MultiValueQueryStringParameters: event.MultiValueQueryStringParameters, + MultiValueHeaders: event.MultiValueHeaders, + Body: event.Body, + IsBase64Encoded: event.IsBase64Encoded, + SourceIP: getALBSourceIP(event), + Context: context.WithValue(ctx, RequestTypeALB, event), + requestType: RequestTypeALB, + } + + return req, nil +} + +func newALBResponse(r *http.Response) (lambdaResponse, error) { + resp := lambdaResponse{ + MultiValueHeaders: r.Header, + } + return resp, nil +} + +// ALBRequestFromContext extracts the ALBTargetGroupRequest event from ctx. +func ALBRequestFromContext(ctx context.Context) (events.ALBTargetGroupRequest, bool) { + val := ctx.Value(RequestTypeALB) + if val == nil { + return events.ALBTargetGroupRequest{}, false + } + event, ok := val.(events.ALBTargetGroupRequest) + return event, ok +} diff --git a/vendor/github.com/akrylysov/algnhsa/apigw_v1.go b/vendor/github.com/akrylysov/algnhsa/apigw_v1.go new file mode 100644 index 0000000..051836f --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/apigw_v1.go @@ -0,0 +1,68 @@ +package algnhsa + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "path" + + "github.com/aws/aws-lambda-go/events" +) + +/* +AWS Documentation: + +- https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html +*/ + +var ( + errAPIGatewayV1UnexpectedRequest = errors.New("expected APIGatewayProxyRequest event") +) + +func newAPIGatewayV1Request(ctx context.Context, payload []byte, opts *Options) (lambdaRequest, error) { + var event events.APIGatewayProxyRequest + if err := json.Unmarshal(payload, &event); err != nil { + return lambdaRequest{}, err + } + if event.RequestContext.AccountID == "" { + return lambdaRequest{}, errAPIGatewayV1UnexpectedRequest + } + + req := lambdaRequest{ + HTTPMethod: event.HTTPMethod, + Path: event.Path, + QueryStringParameters: event.QueryStringParameters, + MultiValueQueryStringParameters: event.MultiValueQueryStringParameters, + Headers: event.Headers, + MultiValueHeaders: event.MultiValueHeaders, + Body: event.Body, + IsBase64Encoded: event.IsBase64Encoded, + SourceIP: event.RequestContext.Identity.SourceIP, + Context: context.WithValue(ctx, RequestTypeAPIGatewayV1, event), + requestType: RequestTypeAPIGatewayV1, + } + + if opts.UseProxyPath { + req.Path = path.Join("/", event.PathParameters["proxy"]) + } + + return req, nil +} + +func newAPIGatewayV1Response(r *http.Response) (lambdaResponse, error) { + resp := lambdaResponse{ + MultiValueHeaders: r.Header, + } + return resp, nil +} + +// APIGatewayV1RequestFromContext extracts the APIGatewayProxyRequest event from ctx. +func APIGatewayV1RequestFromContext(ctx context.Context) (events.APIGatewayProxyRequest, bool) { + val := ctx.Value(RequestTypeAPIGatewayV1) + if val == nil { + return events.APIGatewayProxyRequest{}, false + } + event, ok := val.(events.APIGatewayProxyRequest) + return event, ok +} diff --git a/vendor/github.com/akrylysov/algnhsa/apigw_v2.go b/vendor/github.com/akrylysov/algnhsa/apigw_v2.go new file mode 100644 index 0000000..2da2b6c --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/apigw_v2.go @@ -0,0 +1,90 @@ +package algnhsa + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "path" + "strings" + + "github.com/aws/aws-lambda-go/events" +) + +/* +AWS Documentation: + +- https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html +- https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html +*/ + +var ( + errAPIGatewayV2UnexpectedRequest = errors.New("expected APIGatewayV2HTTPRequest event") +) + +func newAPIGatewayV2Request(ctx context.Context, payload []byte, opts *Options) (lambdaRequest, error) { + var event events.APIGatewayV2HTTPRequest + if err := json.Unmarshal(payload, &event); err != nil { + return lambdaRequest{}, err + } + if event.Version != "2.0" { + return lambdaRequest{}, errAPIGatewayV2UnexpectedRequest + } + + req := lambdaRequest{ + HTTPMethod: event.RequestContext.HTTP.Method, + Path: event.RawPath, + RawQueryString: event.RawQueryString, + Headers: event.Headers, + Body: event.Body, + IsBase64Encoded: event.IsBase64Encoded, + SourceIP: event.RequestContext.HTTP.SourceIP, + Context: context.WithValue(ctx, RequestTypeAPIGatewayV2, event), + requestType: RequestTypeAPIGatewayV2, + } + + // APIGatewayV2 doesn't support multi-value headers. + // For cookies there is a workaround - Cookie headers are assigned to the event Cookies slice. + // All other multi-value headers are joined into a single value with a comma. + // It would be unsafe to split such values on a comma - it's impossible to distinguish a multi-value header + // joined with a comma and a single-value header that contains a comma. + if len(event.Cookies) > 0 { + if req.MultiValueHeaders == nil { + req.MultiValueHeaders = make(map[string][]string) + } + req.MultiValueHeaders["Cookie"] = event.Cookies + } + + if opts.UseProxyPath { + req.Path = path.Join("/", event.PathParameters["proxy"]) + } + + return req, nil +} + +func newAPIGatewayV2Response(r *http.Response) (lambdaResponse, error) { + resp := lambdaResponse{ + Headers: make(map[string]string, len(r.Header)), + } + // APIGatewayV2 doesn't support multi-value headers. + for key, values := range r.Header { + // For cookies there is a workaround - Set-Cookie headers are assigned to the response Cookies slice. + if key == canonicalSetCookieHeaderKey { + resp.Cookies = values + continue + } + // All other multi-value headers are joined into a single value with a comma. + resp.Headers[key] = strings.Join(values, ",") + } + return resp, nil +} + +// APIGatewayV2RequestFromContext extracts the APIGatewayV2HTTPRequest event from ctx. +func APIGatewayV2RequestFromContext(ctx context.Context) (events.APIGatewayV2HTTPRequest, bool) { + val := ctx.Value(RequestTypeAPIGatewayV2) + if val == nil { + return events.APIGatewayV2HTTPRequest{}, false + } + event, ok := val.(events.APIGatewayV2HTTPRequest) + return event, ok +} diff --git a/vendor/github.com/akrylysov/algnhsa/debug.go b/vendor/github.com/akrylysov/algnhsa/debug.go new file mode 100644 index 0000000..e28eb2a --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/debug.go @@ -0,0 +1,103 @@ +package algnhsa + +import ( + "encoding/json" + "github.com/aws/aws-lambda-go/events" + "io" + "mime" + "net/http" +) + +const maxDumpFormParseMem = 32 << 20 // 32MB + +// RequestDebugDump is a dump of the HTTP request including the original Lambda event. +type RequestDebugDump struct { + Method string + URL struct { + Path string + RawPath string + } + RequestURI string + Host string + RemoteAddr string + Header map[string][]string + Form map[string][]string + Body string + APIGatewayV1Request *events.APIGatewayProxyRequest `json:",omitempty"` + APIGatewayV2Request *events.APIGatewayV2HTTPRequest `json:",omitempty"` + ALBRequest *events.ALBTargetGroupRequest `json:",omitempty"` +} + +func parseMediaType(r *http.Request) (string, error) { + ct := r.Header.Get("Content-Type") + if ct == "" { + return "", nil + } + mt, _, err := mime.ParseMediaType(ct) + return mt, err +} + +// NewRequestDebugDump creates a new RequestDebugDump from an HTTP request. +func NewRequestDebugDump(r *http.Request) (*RequestDebugDump, error) { + mt, err := parseMediaType(r) + if err != nil { + return nil, err + } + if mt == "multipart/form-data" { + if err := r.ParseMultipartForm(maxDumpFormParseMem); err != nil { + return nil, err + } + } else { + if err := r.ParseForm(); err != nil { + return nil, err + } + } + + body, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + + dump := &RequestDebugDump{ + Method: r.Method, + URL: struct { + Path string + RawPath string + }{Path: r.URL.Path, RawPath: r.URL.RawPath}, + RequestURI: r.RequestURI, + Host: r.Host, + RemoteAddr: r.RemoteAddr, + Header: r.Header, + Form: r.Form, + Body: string(body), + } + + if event, ok := APIGatewayV1RequestFromContext(r.Context()); ok { + dump.APIGatewayV1Request = &event + } + if event, ok := APIGatewayV2RequestFromContext(r.Context()); ok { + dump.APIGatewayV2Request = &event + } + if event, ok := ALBRequestFromContext(r.Context()); ok { + dump.ALBRequest = &event + } + + return dump, nil +} + +// RequestDebugDumpHandler is an HTTP handler that returns JSON encoded RequestDebugDump. +func RequestDebugDumpHandler(w http.ResponseWriter, r *http.Request) { + dump, err := NewRequestDebugDump(r) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = io.WriteString(w, err.Error()) + return + } + enc := json.NewEncoder(w) + if err := enc.Encode(dump); err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = io.WriteString(w, err.Error()) + return + } + return +} diff --git a/vendor/github.com/whosonfirst/algnhsa/options.go b/vendor/github.com/akrylysov/algnhsa/options.go similarity index 62% rename from vendor/github.com/whosonfirst/algnhsa/options.go rename to vendor/github.com/akrylysov/algnhsa/options.go index 5694562..0da3ec4 100644 --- a/vendor/github.com/whosonfirst/algnhsa/options.go +++ b/vendor/github.com/akrylysov/algnhsa/options.go @@ -1,8 +1,21 @@ package algnhsa +type RequestType int + +const ( + RequestTypeAuto RequestType = iota + RequestTypeAPIGatewayV1 + RequestTypeAPIGatewayV2 + RequestTypeALB +) + // Options holds the optional parameters. type Options struct { - // BinaryContentTypes sets content types that should be treated as binary types by API Gateway. + // RequestType sets the expected request type. + // By default, algnhsa deduces the request type from the lambda function payload. + RequestType RequestType + + // BinaryContentTypes sets content types that should be treated as binary types. // The "*/* value makes algnhsa treat any content type as binary. BinaryContentTypes []string binaryContentTypeMap map[string]bool @@ -10,6 +23,9 @@ type Options struct { // Use API Gateway PathParameters["proxy"] when constructing the request url. // Strips the base path mapping when using a custom domain with API Gateway. UseProxyPath bool + + // DebugLog enables printing request and response objects to stdout. + DebugLog bool } func (opts *Options) setBinaryContentTypeMap() { diff --git a/vendor/github.com/akrylysov/algnhsa/request.go b/vendor/github.com/akrylysov/algnhsa/request.go new file mode 100644 index 0000000..89e94dc --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/request.go @@ -0,0 +1,126 @@ +package algnhsa + +import ( + "context" + "encoding/base64" + "errors" + "io" + "net/http" + "net/url" + "strings" +) + +var errUnsupportedPayloadFormat = errors.New("unsupported payload format; supported formats: APIGatewayV2HTTPRequest, APIGatewayProxyRequest, ALBTargetGroupRequest") + +type lambdaRequest struct { + HTTPMethod string + Path string + QueryStringParameters map[string]string + MultiValueQueryStringParameters map[string][]string + RawQueryString string + Headers map[string]string + MultiValueHeaders map[string][]string + IsBase64Encoded bool + Body string + SourceIP string + Context context.Context + requestType RequestType +} + +func newLambdaRequest(ctx context.Context, payload []byte, opts *Options) (lambdaRequest, error) { + switch opts.RequestType { + case RequestTypeAPIGatewayV1: + return newAPIGatewayV1Request(ctx, payload, opts) + case RequestTypeAPIGatewayV2: + return newAPIGatewayV2Request(ctx, payload, opts) + case RequestTypeALB: + return newALBRequest(ctx, payload, opts) + } + + // The request type wasn't specified. + // Try to decode the payload as APIGatewayV2HTTPRequest, fall back to APIGatewayProxyRequest, then ALBTargetGroupRequest. + req, err := newAPIGatewayV2Request(ctx, payload, opts) + if err != nil && err != errAPIGatewayV2UnexpectedRequest { + return lambdaRequest{}, err + } + if err == nil { + return req, nil + } + + req, err = newAPIGatewayV1Request(ctx, payload, opts) + if err != nil && err != errAPIGatewayV1UnexpectedRequest { + return lambdaRequest{}, err + } + if err == nil { + return req, nil + } + + req, err = newALBRequest(ctx, payload, opts) + if err != nil && err != errALBUnexpectedRequest { + return lambdaRequest{}, err + } + if err == nil { + return req, nil + } + + return lambdaRequest{}, errUnsupportedPayloadFormat +} + +func newHTTPRequest(event lambdaRequest) (*http.Request, error) { + // Build request URL. + rawQuery := event.RawQueryString + if len(rawQuery) == 0 { + params := url.Values{} + for k, v := range event.QueryStringParameters { + params.Set(k, v) + } + for k, vals := range event.MultiValueQueryStringParameters { + params[k] = vals + } + rawQuery = params.Encode() + } + + // https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html + // If you specify values for both headers and multiValueHeaders, API Gateway V1 merges them into a single list. + // If the same key-value pair is specified in both, only the values from multiValueHeaders will appear + // in the merged list. + headers := make(http.Header) + for k, v := range event.Headers { + headers.Set(k, v) + } + for k, vals := range event.MultiValueHeaders { + headers[http.CanonicalHeaderKey(k)] = vals + } + + unescapedPath, err := url.PathUnescape(event.Path) + if err != nil { + return nil, err + } + u := url.URL{ + Host: headers.Get("Host"), + Path: unescapedPath, + RawQuery: rawQuery, + } + + // Handle base64 encoded body. + var body io.Reader = strings.NewReader(event.Body) + if event.IsBase64Encoded { + body = base64.NewDecoder(base64.StdEncoding, body) + } + + // Create a new request. + r, err := http.NewRequestWithContext(event.Context, event.HTTPMethod, u.String(), body) + if err != nil { + return nil, err + } + + // Set remote IP address. + r.RemoteAddr = event.SourceIP + + // Set request URI + r.RequestURI = u.RequestURI() + + r.Header = headers + + return r, nil +} diff --git a/vendor/github.com/akrylysov/algnhsa/response.go b/vendor/github.com/akrylysov/algnhsa/response.go new file mode 100644 index 0000000..e7b8500 --- /dev/null +++ b/vendor/github.com/akrylysov/algnhsa/response.go @@ -0,0 +1,53 @@ +package algnhsa + +import ( + "encoding/base64" + "net/http" + "net/http/httptest" +) + +const acceptAllContentType = "*/*" + +var canonicalSetCookieHeaderKey = http.CanonicalHeaderKey("Set-Cookie") + +// lambdaResponse is a combined lambda response. +// It contains common fields from APIGatewayProxyResponse, APIGatewayV2HTTPResponse and ALBTargetGroupResponse. +type lambdaResponse struct { + StatusCode int `json:"statusCode"` + Headers map[string]string `json:"headers,omitempty"` + MultiValueHeaders map[string][]string `json:"multiValueHeaders,omitempty"` + Cookies []string `json:"cookies,omitempty"` + Body string `json:"body"` + IsBase64Encoded bool `json:"isBase64Encoded,omitempty"` +} + +func newLambdaResponse(w *httptest.ResponseRecorder, binaryContentTypes map[string]bool, requestType RequestType) (lambdaResponse, error) { + result := w.Result() + + var resp lambdaResponse + var err error + switch requestType { + case RequestTypeAPIGatewayV1: + resp, err = newAPIGatewayV1Response(result) + case RequestTypeALB: + resp, err = newALBResponse(result) + case RequestTypeAPIGatewayV2: + resp, err = newAPIGatewayV2Response(result) + } + if err != nil { + return resp, err + } + + resp.StatusCode = result.StatusCode + + // Set body. + contentType := result.Header.Get("Content-Type") + if binaryContentTypes[acceptAllContentType] || binaryContentTypes[contentType] { + resp.Body = base64.StdEncoding.EncodeToString(w.Body.Bytes()) + resp.IsBase64Encoded = true + } else { + resp.Body = w.Body.String() + } + + return resp, nil +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/README.md b/vendor/github.com/aws/aws-lambda-go/events/README.md index 9081a4b..234f227 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README.md @@ -1,39 +1,69 @@ # Overview -[![GoDoc](https://godoc.org/github.com/aws/aws-lambda-go/events?status.svg)](https://godoc.org/github.com/aws/aws-lambda-go/events) +[![Go Reference](https://pkg.go.dev/badge/github.com/aws/aws-lambda-go/events.svg)](https://pkg.go.dev/github.com/aws/aws-lambda-go/events) This package provides input types for Lambda functions that process AWS events. # Samples +[ALB Target Group Events](README_ALBTargetGroupEvents.md) + [API Gateway](README_ApiGatewayEvent.md) [API Gateway Custom Authorizer](README_ApiGatewayCustomAuthorizer.md) [AppSync](README_AppSync.md) +[AutoScaling](README_AutoScaling.md) + +[ClientVPN Connection Handler](README_ClientVPN.md) + [CloudFormation Events](../cfn/README.md) -[Code Commit Events](README_CodeCommit.md) +[CloudWatch Events](README_CloudWatch_Events.md) + +[CloudWatch Logs](README_CloudWatch_Logs.md) + +[Chime Bot Events](README_Chime_Bots.md) + +[CodeBuild Events](README_CodeBuild.md) + +[CodeCommit Events](README_CodeCommit.md) + +[CodeDeploy Events](README_CodeDeploy.md) [Cognito Events](README_Cognito.md) +[Cognito Custom Authentication](README_Cognito_UserPools_CustomAuthLambdaTriggers.md) + [Cognito PostConfirmation](README_Cognito_UserPools_PostConfirmation.md) +[Cognito PreAuthentication](README_Cognito_UserPools_PreAuthentication.md) + [Cognito PreSignup](README_Cognito_UserPools_PreSignup.md) [Cognito PreTokenGen](README_Cognito_UserPools_PreTokenGen.md) [Config Events](README_Config.md) +[Connect Events](README_Connect.md) + [DynamoDB Events](README_DynamoDB.md) [Kinesis Events](README_Kinesis.md) +[Kinesis Data Analytics Events](README_KinesisDataAnalytics.md) + [Kinesis Firehose Events](README_KinesisFirehose.md) +[Lambda Events](README_Lambda.md) + +[Lex Events](README_Lex.md) + [S3 Events](README_S3.md) +[S3 Batch Job Events](README_S3_Batch_Job.md) + [SES Events](README_SES.md) [SNS Events](README_SNS.md) diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_ALBTargetGroupEvents.md b/vendor/github.com/aws/aws-lambda-go/events/README_ALBTargetGroupEvents.md index cf370d0..e0a4821 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_ALBTargetGroupEvents.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_ALBTargetGroupEvents.md @@ -6,7 +6,7 @@ https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-funct # Sample Function -The following is a sample class and Lambda function that receives an ALB Target Group event as an input, writes some of the incoming data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample class and Lambda function that receives an ALB Target Group event as an input, writes some of the incoming data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go @@ -29,7 +29,7 @@ func handleRequest(ctx context.Context, request events.ALBTargetGroupRequest) (e fmt.Printf(" %s: %s\n", key, value) } - return events.ALBTargetGroupResponse{Body: request.Body, StatusCode: 200, StatusDescription: "200 OK", IsBase64Encoded: false}, nil + return events.ALBTargetGroupResponse{Body: request.Body, StatusCode: 200, StatusDescription: "200 OK", IsBase64Encoded: false, Headers: map[string]string{}}, nil } func main() { diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_ApiGatewayEvent.md b/vendor/github.com/aws/aws-lambda-go/events/README_ApiGatewayEvent.md index 65ceabc..1faaf59 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_ApiGatewayEvent.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_ApiGatewayEvent.md @@ -4,7 +4,7 @@ API Gateway events consist of a request that was routed to a Lambda function by # Sample Function -The following is a sample class and Lambda function that receives Amazon API Gateway event record data as an input, writes some of the record data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample class and Lambda function that receives Amazon API Gateway event record data as an input, writes some of the record data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_AppSync.md b/vendor/github.com/aws/aws-lambda-go/events/README_AppSync.md deleted file mode 100644 index c4a0e04..0000000 --- a/vendor/github.com/aws/aws-lambda-go/events/README_AppSync.md +++ /dev/null @@ -1,30 +0,0 @@ - -# Sample Function - -The following is a sample class and Lambda function that receives AWS AppSync event message data as input, writes some of the message data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) - -```go -package main - -import ( - "context" - "fmt" - - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-lambda-go/lambda" -) - -func handler(ctx context.Context, event events.AppSyncResolverTemplate) error { - - fmt.Printf("Version: %s\n", event.Version) - fmt.Printf("Operation: %s\n", event.Operation) - fmt.Printf("Payload: %s\n", string(event.Payload)) - - return nil -} - -func main() { - lambda.Start(handler) -} - -``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_AutoScaling.md b/vendor/github.com/aws/aws-lambda-go/events/README_AutoScaling.md index dfc648b..2835ada 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_AutoScaling.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_AutoScaling.md @@ -1,6 +1,6 @@ # Sample Function -The following is a sample Lambda function that receives an Auto Scaling event as an input and logs the EC2 instance ID to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample Lambda function that receives an Auto Scaling event as an input and logs the EC2 instance ID to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go import ( diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Chime_Bots.md b/vendor/github.com/aws/aws-lambda-go/events/README_Chime_Bots.md new file mode 100644 index 0000000..d5d933d --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Chime_Bots.md @@ -0,0 +1,67 @@ +# Sample Function + +The following is a sample class and Lambda function that receives a Amazon Chime Bot event and handles the various event types accordingly. + +```go + +package main + +import ( + "fmt" + "context" + "net/http" + "bytes" + "encoding/json" + "errors" + "strconv" + + "github.com/aws/aws-lambda-go/events" +) + +func handler(_ context.Context, chimeBotEvent events.ChimeBotEvent) error { + switch chimeBotEvent.EventType { + case "Invite": + if err := message(chimeBotEvent.InboundHTTPSEndpoint.URL, "Thanks for inviting me to this room " + chimeBotEvent.Sender.SenderID); err != nil { + return fmt.Errorf("failed to send webhook message: %v", err) + } + return nil + case "Mention": + if err := message(chimeBotEvent.InboundHTTPSEndpoint.URL, "Thanks for mentioning me " + chimeBotEvent.Sender.SenderID); err != nil { + return fmt.Errorf("failed to send webhook message: %v", err) + } + return nil + case "Remove": + fmt.Printf("I have been removed from %q by %q", chimeBotEvent.Discussion.DiscussionType, chimeBotEvent.Sender.SenderID) + return nil + default: + return fmt.Errorf("event type %q is unsupported", chimeBotEvent.EventType) + } +} + +func message(url, content string) (error) { + input := &bytes.Buffer{} + if err := json.NewEncoder(input).Encode(webhookInput{Content:content}); err != nil { + return errors.New("failed to marshal request: " + err.Error()) + } + + resp, err := http.Post("POST", url, input) + if err != nil { + return errors.New("failed to execute post http request: " + err.Error()) + } + + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + + if resp.StatusCode != http.StatusOK { + return errors.New("bad response: status code not is " + strconv.Itoa(http.StatusOK) + " not " + strconv.Itoa(resp.StatusCode)) + } + + return nil +} + +type webhookInput struct { + Content string `json:"Content"` +} + +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_ClientVPN.md b/vendor/github.com/aws/aws-lambda-go/events/README_ClientVPN.md new file mode 100644 index 0000000..ab8c0e3 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_ClientVPN.md @@ -0,0 +1,56 @@ +# Sample Function + +The following is a sample Lambda function that receives a Client VPN connection handler request as an input and then validates the IP address input and checks whether the connection source IP is on the allowed list defined as a map inside the function. If the source IP matches an allowed IP address it allows the access, otherwise an error message is presented to the user. Debug logs are generated to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) + +```go +import ( + "fmt" + "log" + "net" + + "encoding/json" + + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/lambda" +) + +var ( + AllowedIPs = map[string]bool{ + "10.11.12.13": true, + } +) + +func handler(request events.ClientVPNConnectionHandlerRequest) (events.ClientVPNConnectionHandlerResponse, error) { + requestJson, _ := json.MarshalIndent(request, "", " ") + log.Printf("REQUEST: %s", requestJson) + + sourceIP := request.PublicIP + if net.ParseIP(sourceIP) == nil { + return events.ClientVPNConnectionHandlerResponse{}, fmt.Errorf("Invalid parameter PublicIP passed in request: %q", sourceIP) + } + + log.Printf("SOURCE IP: %q", sourceIP) + + if allowed, ok := AllowedIPs[sourceIP]; ok && allowed { + log.Printf("Allowing access from: %q", sourceIP) + return events.ClientVPNConnectionHandlerResponse{ + Allow: true, + ErrorMsgOnFailedPostureCompliance: "", + PostureComplianceStatuses: []string{}, + SchemaVersion: "v1", + }, nil + } + + log.Printf("Blocking access from: %q", sourceIP) + return events.ClientVPNConnectionHandlerResponse{ + Allow: false, + ErrorMsgOnFailedPostureCompliance: "You're trying to connect from an IP address that is not allowed.", + PostureComplianceStatuses: []string{"BlockedSourceIP"}, + SchemaVersion: "v1", + }, nil +} + +func main() { + lambda.Start(handler) +} +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_CloudWatch_Logs.md b/vendor/github.com/aws/aws-lambda-go/events/README_CloudWatch_Logs.md new file mode 100644 index 0000000..75a219e --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_CloudWatch_Logs.md @@ -0,0 +1,20 @@ + +# Sample Function + +The following is a Lambda function that receives Amazon CloudWatch Logs event record data as input and writes message part to Lambda's CloudWatch Logs. Note that by default anything written to Console will be logged as CloudWatch Logs events. + +```go +import ( + "context" + "fmt" + + "github.com/aws/aws-lambda-go/events" +) + +func handler(ctx context.Context, logsEvent events.CloudwatchLogsEvent) { + data, _ := logsEvent.AWSLogs.Parse() + for _, logEvent := range data.LogEvents { + fmt.Printf("Message = %s\n", logEvent.Message) + } +} +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_CodeBuild.md b/vendor/github.com/aws/aws-lambda-go/events/README_CodeBuild.md new file mode 100644 index 0000000..729eaa4 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_CodeBuild.md @@ -0,0 +1,15 @@ +# Sample Function + +The following is a sample Lambda function that receives an Amazon CodeBuild event +and writes it to standard output. + +```go +import ( + "fmt" + "github.com/aws/aws-lambda-go/events" +) + +func handleRequest(evt events.CodeBuildEvent) { + fmt.Println(evt) +} +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_CodeDeploy.md b/vendor/github.com/aws/aws-lambda-go/events/README_CodeDeploy.md new file mode 100644 index 0000000..761c0b0 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_CodeDeploy.md @@ -0,0 +1,15 @@ +# Sample Function + +The following is a sample Lambda function that receives an Amazon CodeDeploy event +and writes it to standard output. + +```go +import ( + "fmt" + "github.com/aws/aws-lambda-go/events" +) + +func handleRequest(evt events.CodeDeployEvent) { + fmt.Println(evt) +} +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito.md b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito.md index a8ee586..9a7249e 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito.md @@ -1,6 +1,6 @@ # Sample Function -The following is a sample Lambda function that receives Amazon Cognito Sync event record data as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample Lambda function that receives Amazon Cognito Sync event record data as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_CustomAuthLambdaTriggers.md b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_CustomAuthLambdaTriggers.md new file mode 100644 index 0000000..737339c --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_CustomAuthLambdaTriggers.md @@ -0,0 +1,69 @@ +# Sample Function + +The following is a sample Lambda functions that are used for custom authentication with Cognito User Pools. +These Lambda triggers issue and verify their own challenges as part of a user pool [custom authentication flow](https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-authentication-flow.html#amazon-cognito-user-pools-custom-authentication-flow). + +Please see instructions for setting up the Cognito triggers at https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html + +Define Auth Challenge Lambda Trigger: +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-lambda-go/events" +) + +func handler(event *events.CognitoEventUserPoolsDefineAuthChallenge) (*events.CognitoEventUserPoolsDefineAuthChallenge, error) { + fmt.Printf("Define Auth Challenge: %+v\n", event) + return event, nil +} + +func main() { + lambda.Start(handler) +} +``` + +Create Auth Challenge Lambda Trigger: +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-lambda-go/events" +) + +func handler(event *events.CognitoEventUserPoolsCreateAuthChallenge) (*events.CognitoEventUserPoolsCreateAuthChallenge, error) { + fmt.Printf("Create Auth Challenge: %+v\n", event) + return event, nil +} + +func main() { + lambda.Start(handler) +} +``` + +Verify Auth Challenge Response Lambda Trigger: +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-lambda-go/events" +) + +func handler(event *events.CognitoEventUserPoolsVerifyAuthChallenge) (*events.CognitoEventUserPoolsVerifyAuthChallenge, error) { + fmt.Printf("Verify Auth Challenge: %+v\n", event) + return event, nil +} + +func main() { + lambda.Start(handler) +} +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PostConfirmation.md b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PostConfirmation.md index 3471837..0efab64 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PostConfirmation.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PostConfirmation.md @@ -1,6 +1,6 @@ # Sample Function -The following is a sample Lambda function that receives Amazon Cognito User Pools post-confirmation event as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample Lambda function that receives Amazon Cognito User Pools post-confirmation event as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) Please see instructions for setting up the Cognito triggers at https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html . diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreAuthentication.md b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreAuthentication.md new file mode 100644 index 0000000..0e74214 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreAuthentication.md @@ -0,0 +1,25 @@ +# Sample Function + +The following is a sample Lambda function that receives Amazon Cognito User Pools pre-authentication event as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) + +Please see instructions for setting up the Cognito triggers at https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html . + +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-lambda-go/events" +) + +func handler(event events.CognitoEventUserPoolsPreAuthentication) (events.CognitoEventUserPoolsPreAuthentication, error) { + fmt.Printf("PreAuthentication of user: %s\n", event.UserName) + return event, nil +} + +func main() { + lambda.Start(handler) +} +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreSignup.md b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreSignup.md index 9e782fc..0d48499 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreSignup.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreSignup.md @@ -1,6 +1,6 @@ # Sample Function -The following is a sample Lambda function that receives Amazon Cognito User Pools pre-signup event as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample Lambda function that receives Amazon Cognito User Pools pre-signup event as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) Please see instructions for setting up the Cognito triggers at https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html . @@ -8,19 +8,20 @@ Please see instructions for setting up the Cognito triggers at https://docs.aws. package main import ( - "fmt" + "fmt" - "github.com/aws/aws-lambda-go/lambda" - "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/lambda" ) +// handler is the lambda handler invoked by the `lambda.Start` function call func handler(event events.CognitoEventUserPoolsPreSignup) (events.CognitoEventUserPoolsPreSignup, error) { - fmt.Printf("PreSignup of user: %s\n", event.UserName) - event.Response.AutoConfirmUser = true - return event, nil + fmt.Printf("PreSignup of user: %s\n", event.UserName) + event.Response.AutoConfirmUser = true + return event, nil } func main() { - lambda.Start(handler) + lambda.Start(handler) } ``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreTokenGen.md b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreTokenGen.md index eddfcb4..f42cba0 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreTokenGen.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Cognito_UserPools_PreTokenGen.md @@ -1,6 +1,6 @@ # Sample Function -The following is a sample Lambda function that receives Amazon Cognito User Pools pre-token-gen event as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample Lambda function that receives Amazon Cognito User Pools pre-token-gen event as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) Please see instructions for setting up the Cognito triggers at https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html . @@ -16,7 +16,7 @@ import ( func handler(event events.CognitoEventUserPoolsPreTokenGen) (events.CognitoEventUserPoolsPreTokenGen, error) { fmt.Printf("PreTokenGen of user: %s\n", event.UserName) - event.Response.ClaimOverrideDetails.ClaimsToSupress = []string{"family_name"} + event.Response.ClaimsOverrideDetails.ClaimsToSuppress = []string{"family_name"} return event, nil } diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Config.md b/vendor/github.com/aws/aws-lambda-go/events/README_Config.md index e2d217a..e8d0833 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_Config.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Config.md @@ -1,6 +1,6 @@ # Sample Function -The following is a sample Lambda function that receives Amazon Config event record data as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample Lambda function that receives Amazon Config event record data as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Connect.md b/vendor/github.com/aws/aws-lambda-go/events/README_Connect.md index e33b144..9e4c92b 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_Connect.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Connect.md @@ -1,6 +1,6 @@ # Sample Function -The following is a sample Lambda function that receives an Amazon Connect event as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample Lambda function that receives an Amazon Connect event as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go package main diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_ECS_ContainerInstance.md b/vendor/github.com/aws/aws-lambda-go/events/README_ECS_ContainerInstance.md new file mode 100644 index 0000000..36d7e1b --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_ECS_ContainerInstance.md @@ -0,0 +1,26 @@ +# Sample Function + +The following is a sample class and Lambda function that receives Amazon ECS Container instance state change events record data as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) + +```go + +package main + +import ( + "context" + "fmt" + + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/lambda" +) + +func handler(ctx context.Context, ecsEvent events.ECSContainerInstanceEvent) { + outputJSON, _ := json.MarshalIndent(ecsEvent, "", " ") + fmt.Printf("Data = %s", outputJSON) +} + +func main() { + lambda.Start(handler) +} + +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_EventBridge_Events.md b/vendor/github.com/aws/aws-lambda-go/events/README_EventBridge_Events.md new file mode 100644 index 0000000..52ea90c --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_EventBridge_Events.md @@ -0,0 +1,38 @@ + +# Sample Function + +The following is a Lambda function that receives Amazon EventBridge event record data as input and writes event detail to Lambda's CloudWatch Logs. Note that by default anything written to Console will be logged as CloudWatch Logs events. + +```go +import ( + "context" + "fmt" + + "github.com/aws/aws-lambda-go/events" +) + +func handler(ctx context.Context, event events.EventBridgeEvent) { + fmt.Printf("Detail = %s\n", event.Detail) +} +``` + +## CloudWatch Scheduled Events + +If you have a constant JSON text in a CloudWatch Scheduled Event, it can be accessed either by explicitly defining a structure for the json payload you would expect: + +```go +type MyRequest struct { + Name string `json:"name"` +} + +func handler(ctx context.Context, req MyRequest) { +} +``` + +or by accepting raw json blob as is: + +```go +func handler(ctx context.Context, b json.RawMessage) { + // json.RawMessage is basically []byte which can be unmarshalled +} +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Kinesis.md b/vendor/github.com/aws/aws-lambda-go/events/README_Kinesis.md index 4dd23b2..7133d4b 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_Kinesis.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Kinesis.md @@ -1,6 +1,6 @@ # Sample Function -The following is a sample class and Lambda function that receives Amazon Kinesis event record data as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample class and Lambda function that receives Amazon Kinesis event record data as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_KinesisDataAnalytics.md b/vendor/github.com/aws/aws-lambda-go/events/README_KinesisDataAnalytics.md new file mode 100644 index 0000000..6225294 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_KinesisDataAnalytics.md @@ -0,0 +1,45 @@ +# Sample function + +The following is an example for an Application Destination Lambda function that receives Amazon Kinesis Data Analytics event records as an input. To send Kinesis Data Analytics output records the Lambda function must be compliant with the (required input and return data models)[https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output-lambda.html], so the handler returns a list of delivery statuses for each record. + +The following Lambda function receives Amazon Kinesis Data Analytics event record data as an input and writes some of the record data to CloudWatch Logs. For each entry it adds an element to the response slice, marking it delivered. When the logic considers the delivery to be failed the `events.KinesisAnalyticsOutputDeliveryFailed` value should be used for the response `Result` field. + + +```go +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/lambda" +) + +func handler(ctx context.Context, kinesisAnalyticsEvent events.KinesisAnalyticsOutputDeliveryEvent) (events.KinesisAnalyticsOutputDeliveryResponse, error) { + var err error + + var responses events.KinesisAnalyticsOutputDeliveryResponse + responses.Records = make([]events.KinesisAnalyticsOutputDeliveryResponseRecord, len(kinesisAnalyticsEvent.Records)) + + for i, record := range kinesisAnalyticsEvent.Records { + responses.Records[i] = events.KinesisAnalyticsOutputDeliveryResponseRecord{ + RecordID: record.RecordID, + Result: events.KinesisAnalyticsOutputDeliveryOK, + } + + dataBytes := record.Data + dataText := string(dataBytes) + + fmt.Printf("%s Data = %s \n", record.RecordID, dataText) + } + return responses, err +} + +func main() { + lambda.Start(handler) +} + +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Lambda.md b/vendor/github.com/aws/aws-lambda-go/events/README_Lambda.md new file mode 100644 index 0000000..95aa75e --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Lambda.md @@ -0,0 +1,35 @@ +# Overview + +Lambda events consist of a request that was routed to a Lambda function by the Lambda Function URLs feature. When this happens, Lambda expects the result of the function to be the response that Lambda should respond with. + +# Sample Function + +The following is a sample class and Lambda function that receives Lambda Function URLs event record data as an input, writes some of the record data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) + +```go +package main + +import ( + "context" + "fmt" + + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/lambda" +) + +func handleRequest(ctx context.Context, request events.LambdaFunctionURLRequest) (events.LambdaFunctionURLResponse, error) { + fmt.Printf("Processing request data for request %s.\n", request.RequestContext.RequestID) + fmt.Printf("Body size = %d.\n", len(request.Body)) + + fmt.Println("Headers:") + for key, value := range request.Headers { + fmt.Printf(" %s: %s\n", key, value) + } + + return events.LambdaFunctionURLResponse{Body: request.Body, StatusCode: 200}, nil +} + +func main() { + lambda.Start(handleRequest) +} +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_Lex.md b/vendor/github.com/aws/aws-lambda-go/events/README_Lex.md index abcc07a..88d04e9 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_Lex.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_Lex.md @@ -1,7 +1,7 @@ # Sample Function -The following is a sample class and Lambda function that receives Amazon Lex event data as input, writes some of the record data to CloudWatch Logs, and responds back to Lex. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample class and Lambda function that receives Amazon Lex event data as input, writes some of the record data to CloudWatch Logs, and responds back to Lex. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go import ( diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_S3.md b/vendor/github.com/aws/aws-lambda-go/events/README_S3.md index a8f746f..ac36ab0 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_S3.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_S3.md @@ -1,20 +1,30 @@ # Sample Function -The following is a sample class and Lambda function that receives Amazon S3 event record data as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample class and Lambda function that receives Amazon S3 event record data as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go +// main.go +package main + import ( - "fmt" - "context" - "github.com/aws/aws-lambda-go/events" + "fmt" + "context" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-lambda-go/events" ) func handler(ctx context.Context, s3Event events.S3Event) { - for _, record := range s3Event.Records { - s3 := record.S3 - fmt.Printf("[%s - %s] Bucket = %s, Key = %s \n", record.EventSource, record.EventTime, s3.Bucket.Name, s3.Object.Key) - } + for _, record := range s3Event.Records { + s3 := record.S3 + fmt.Printf("[%s - %s] Bucket = %s, Key = %s \n", record.EventSource, record.EventTime, s3.Bucket.Name, s3.Object.Key) + } +} + + +func main() { + // Make the handler available for Remote Procedure Call by AWS Lambda + lambda.Start(handler) } ``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_S3_Batch_Job.md b/vendor/github.com/aws/aws-lambda-go/events/README_S3_Batch_Job.md new file mode 100644 index 0000000..19e4549 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_S3_Batch_Job.md @@ -0,0 +1,39 @@ +# Sample Function + +The following is a sample class and Lambda function that receives Amazon S3 event record data as an input and writes some of the record data to CloudWatch Logs. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) + +```go + +import ( + "fmt" + "context" + "github.com/aws/aws-lambda-go/events" +) + +func handler(ctx context.Context, e events.S3BatchJobEvent) (response events.S3BatchJobResponse, err error) { + fmt.Printf("InvocationSchemaVersion: %s\n", e.InvocationSchemaVersion) + fmt.Printf("InvocationID: %s\n", e.InvocationID) + fmt.Printf("Job.ID: %s\n", e.Job.ID) + + for _, task := range e.Tasks { + fmt.Printf("TaskID: %s\n", task.TaskID) + fmt.Printf("S3Key: %s\n", task.S3Key) + fmt.Printf("S3VersionID: %s\n", task.S3VersionID) + fmt.Printf("S3BucketARN: %s\n", task.S3BucketARN) + + } + + fmt.Printf("InvocationSchemaVersion: %s\n", response.InvocationSchemaVersion) + fmt.Printf("TreatMissingKeysAs: %s\n", response.TreatMissingKeysAs) + fmt.Printf("InvocationID: %s\n", response.InvocationID) + + for _, result := range response.Results { + fmt.Printf("TaskID: %s\n", result.TaskID) + fmt.Printf("ResultCode: %s\n", result.ResultCode) + fmt.Printf("ResultString: %s\n", result.ResultString) + } + + return +} + +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_S3_Object_Lambda.md b/vendor/github.com/aws/aws-lambda-go/events/README_S3_Object_Lambda.md new file mode 100644 index 0000000..2166cb9 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_S3_Object_Lambda.md @@ -0,0 +1,83 @@ +# Sample Function + +The following is a sample class and Lambda function that receives Amazon S3 Object Lambda event record data as an input and returns an object metadata output. + +```go + +// main.go +package main + +import ( + "context" + "crypto/md5" + "encoding/hex" + "encoding/json" + "io/ioutil" + "net/http" + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +func handler(ctx context.Context, event events.S3ObjectLambdaEvent) error { + url := event.GetObjectContext.InputS3Url + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + transformedObject := TransformedObject{ + Metadata: Metadata{ + Length: len(bodyBytes), + Md5: toMd5(bodyBytes), + }, + } + jsonData, err := json.Marshal(transformedObject) + if err != nil { + return err + } + cfg, err := config.LoadDefaultConfig(context.TODO()) + if err != nil { + return err + } + svc := s3.NewFromConfig(cfg) + input := &s3.WriteGetObjectResponseInput{ + RequestRoute: &event.GetObjectContext.OutputRoute, + RequestToken: &event.GetObjectContext.OutputToken, + Body: strings.NewReader(string(jsonData)), + } + res, err := svc.WriteGetObjectResponse(ctx, input) + if err != nil { + return err + } + fmt.Printf("%v", res) + return nil +} + +func toMd5(data []byte) string { + hasher := md5.New() + hasher.Write(data) + hash := hasher.Sum(nil) + + return hex.EncodeToString(hash) +} + +type TransformedObject struct { + Metadata Metadata `json:"metadata"` +} + +type Metadata struct { + Length int `json:"length"` + Md5 string `json:"md5"` +} + +func main() { + lambda.Start(handler) +} + +``` diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_SES.md b/vendor/github.com/aws/aws-lambda-go/events/README_SES.md index ddfaed9..dd47118 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_SES.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_SES.md @@ -1,7 +1,7 @@ # Sample Function -The following is a sample class and Lambda function that receives Amazon SES event message data as input, writes some of the message data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample class and Lambda function that receives Amazon SES event message data as input, writes some of the message data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go package main diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_SNS.md b/vendor/github.com/aws/aws-lambda-go/events/README_SNS.md index f198793..924250c 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_SNS.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_SNS.md @@ -1,7 +1,7 @@ # Sample Function -The following is a sample class and Lambda function that receives Amazon SNS event record data as input, writes some of the record data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample class and Lambda function that receives Amazon SNS event record data as input, writes some of the record data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go import ( diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_SQS.md b/vendor/github.com/aws/aws-lambda-go/events/README_SQS.md index e5340f3..d852b16 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/README_SQS.md +++ b/vendor/github.com/aws/aws-lambda-go/events/README_SQS.md @@ -1,7 +1,7 @@ # Sample Function -The following is a sample class and Lambda function that receives Amazon SQS event message data as input, writes some of the message data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that by default anything written to Console will be logged as CloudWatch Logs events.) +The following is a sample class and Lambda function that receives Amazon SQS event message data as input, writes some of the message data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that anything written to stdout or stderr will be logged as CloudWatch Logs events.) ```go package main diff --git a/vendor/github.com/aws/aws-lambda-go/events/README_SecretsManager_SecretRotationEvent.md b/vendor/github.com/aws/aws-lambda-go/events/README_SecretsManager_SecretRotationEvent.md new file mode 100644 index 0000000..1b572cf --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/README_SecretsManager_SecretRotationEvent.md @@ -0,0 +1,38 @@ +# Sample Function + +The following is a sample Lambda function that handles a SecretsManager secret rotation event. + +```go +package main + +import ( + "fmt" + "context" + + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-lambda-go/events" +) + +func handler(ctx context.Context, event SecretsManagerSecretRotationEvent) error { + fmt.Printf("rotating secret %s with token %s\n", + event.SecretID, event.ClientRequestToken) + + switch event.Step { + case "createSecret": + // create + case "setSecret": + // set + case "finishSecret": + // finish + case "testSecret": + // test + } + + return nil +} + + +func main() { + lambda.Start(handler) +} +``` \ No newline at end of file diff --git a/vendor/github.com/aws/aws-lambda-go/events/activemq.go b/vendor/github.com/aws/aws-lambda-go/events/activemq.go new file mode 100644 index 0000000..fec67c4 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/activemq.go @@ -0,0 +1,31 @@ +// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +package events + +type ActiveMQEvent struct { + EventSource string `json:"eventSource"` + EventSourceARN string `json:"eventSourceArn"` + Messages []ActiveMQMessage `json:"messages"` +} + +type ActiveMQMessage struct { + MessageID string `json:"messageID"` + MessageType string `json:"messageType"` + Timestamp int64 `json:"timestamp"` + DeliveryMode int `json:"deliveryMode"` + CorrelationID string `json:"correlationID"` + ReplyTo string `json:"replyTo"` + Destination ActiveMQDestination `json:"destination"` + Redelivered bool `json:"redelivered"` + Type string `json:"type"` + Expiration int64 `json:"expiration"` + Priority int `json:"priority"` + Data string `json:"data"` + BrokerInTime int64 `json:"brokerInTime"` + BrokerOutTime int64 `json:"brokerOutTime"` + Properties map[string]string `json:"properties"` +} + +type ActiveMQDestination struct { + PhysicalName string `json:"physicalName"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/alb.go b/vendor/github.com/aws/aws-lambda-go/events/alb.go index 5c1742d..ec5ee28 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/alb.go +++ b/vendor/github.com/aws/aws-lambda-go/events/alb.go @@ -20,7 +20,7 @@ type ALBTargetGroupRequestContext struct { // ELBContext contains the information to identify the ARN invoking the lambda type ELBContext struct { - TargetGroupArn string `json:"targetGroupArn"` + TargetGroupArn string `json:"targetGroupArn"` //nolint: stylecheck } // ALBTargetGroupResponse configures the response to be returned by the ALB Lambda target group for the request @@ -29,6 +29,6 @@ type ALBTargetGroupResponse struct { StatusDescription string `json:"statusDescription"` Headers map[string]string `json:"headers"` MultiValueHeaders map[string][]string `json:"multiValueHeaders"` - Body string `json:"body"` + Body string `json:"body,omitempty"` IsBase64Encoded bool `json:"isBase64Encoded"` } diff --git a/vendor/github.com/aws/aws-lambda-go/events/apigw.go b/vendor/github.com/aws/aws-lambda-go/events/apigw.go index 520a02c..04ac73f 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/apigw.go +++ b/vendor/github.com/aws/aws-lambda-go/events/apigw.go @@ -30,15 +30,105 @@ type APIGatewayProxyResponse struct { // APIGatewayProxyRequestContext contains the information to identify the AWS account and resources invoking the // Lambda function. It also includes Cognito identity information for the caller. type APIGatewayProxyRequestContext struct { - AccountID string `json:"accountId"` - ResourceID string `json:"resourceId"` - Stage string `json:"stage"` - RequestID string `json:"requestId"` - Identity APIGatewayRequestIdentity `json:"identity"` - ResourcePath string `json:"resourcePath"` - Authorizer map[string]interface{} `json:"authorizer"` - HTTPMethod string `json:"httpMethod"` - APIID string `json:"apiId"` // The API Gateway rest API Id + AccountID string `json:"accountId"` + ResourceID string `json:"resourceId"` + OperationName string `json:"operationName,omitempty"` + Stage string `json:"stage"` + DomainName string `json:"domainName"` + DomainPrefix string `json:"domainPrefix"` + RequestID string `json:"requestId"` + ExtendedRequestID string `json:"extendedRequestId"` + Protocol string `json:"protocol"` + Identity APIGatewayRequestIdentity `json:"identity"` + ResourcePath string `json:"resourcePath"` + Path string `json:"path"` + Authorizer map[string]interface{} `json:"authorizer"` + HTTPMethod string `json:"httpMethod"` + RequestTime string `json:"requestTime"` + RequestTimeEpoch int64 `json:"requestTimeEpoch"` + APIID string `json:"apiId"` // The API Gateway rest API Id +} + +// APIGatewayV2HTTPRequest contains data coming from the new HTTP API Gateway +type APIGatewayV2HTTPRequest struct { + Version string `json:"version"` + RouteKey string `json:"routeKey"` + RawPath string `json:"rawPath"` + RawQueryString string `json:"rawQueryString"` + Cookies []string `json:"cookies,omitempty"` + Headers map[string]string `json:"headers"` + QueryStringParameters map[string]string `json:"queryStringParameters,omitempty"` + PathParameters map[string]string `json:"pathParameters,omitempty"` + RequestContext APIGatewayV2HTTPRequestContext `json:"requestContext"` + StageVariables map[string]string `json:"stageVariables,omitempty"` + Body string `json:"body,omitempty"` + IsBase64Encoded bool `json:"isBase64Encoded"` +} + +// APIGatewayV2HTTPRequestContext contains the information to identify the AWS account and resources invoking the Lambda function. +type APIGatewayV2HTTPRequestContext struct { + RouteKey string `json:"routeKey"` + AccountID string `json:"accountId"` + Stage string `json:"stage"` + RequestID string `json:"requestId"` + Authorizer *APIGatewayV2HTTPRequestContextAuthorizerDescription `json:"authorizer,omitempty"` + APIID string `json:"apiId"` // The API Gateway HTTP API Id + DomainName string `json:"domainName"` + DomainPrefix string `json:"domainPrefix"` + Time string `json:"time"` + TimeEpoch int64 `json:"timeEpoch"` + HTTP APIGatewayV2HTTPRequestContextHTTPDescription `json:"http"` + Authentication APIGatewayV2HTTPRequestContextAuthentication `json:"authentication,omitempty"` +} + +// APIGatewayV2HTTPRequestContextAuthorizerDescription contains authorizer information for the request context. +type APIGatewayV2HTTPRequestContextAuthorizerDescription struct { + JWT *APIGatewayV2HTTPRequestContextAuthorizerJWTDescription `json:"jwt,omitempty"` + Lambda map[string]interface{} `json:"lambda,omitempty"` + IAM *APIGatewayV2HTTPRequestContextAuthorizerIAMDescription `json:"iam,omitempty"` +} + +// APIGatewayV2HTTPRequestContextAuthorizerJWTDescription contains JWT authorizer information for the request context. +type APIGatewayV2HTTPRequestContextAuthorizerJWTDescription struct { + Claims map[string]string `json:"claims"` + Scopes []string `json:"scopes,omitempty"` +} + +// APIGatewayV2HTTPRequestContextAuthorizerIAMDescription contains IAM information for the request context. +type APIGatewayV2HTTPRequestContextAuthorizerIAMDescription struct { + AccessKey string `json:"accessKey"` + AccountID string `json:"accountId"` + CallerID string `json:"callerId"` + CognitoIdentity APIGatewayV2HTTPRequestContextAuthorizerCognitoIdentity `json:"cognitoIdentity,omitempty"` + PrincipalOrgID string `json:"principalOrgId"` + UserARN string `json:"userArn"` + UserID string `json:"userId"` +} + +// APIGatewayV2HTTPRequestContextAuthorizerCognitoIdentity contains Cognito identity information for the request context. +type APIGatewayV2HTTPRequestContextAuthorizerCognitoIdentity struct { + AMR []string `json:"amr"` + IdentityID string `json:"identityId"` + IdentityPoolID string `json:"identityPoolId"` +} + +// APIGatewayV2HTTPRequestContextHTTPDescription contains HTTP information for the request context. +type APIGatewayV2HTTPRequestContextHTTPDescription struct { + Method string `json:"method"` + Path string `json:"path"` + Protocol string `json:"protocol"` + SourceIP string `json:"sourceIp"` + UserAgent string `json:"userAgent"` +} + +// APIGatewayV2HTTPResponse configures the response to be returned by API Gateway V2 for the request +type APIGatewayV2HTTPResponse struct { + StatusCode int `json:"statusCode"` + Headers map[string]string `json:"headers"` + MultiValueHeaders map[string][]string `json:"multiValueHeaders"` + Body string `json:"body"` + IsBase64Encoded bool `json:"isBase64Encoded,omitempty"` + Cookies []string `json:"cookies"` } // APIGatewayRequestIdentity contains identity information for the request caller. @@ -48,11 +138,12 @@ type APIGatewayRequestIdentity struct { CognitoIdentityID string `json:"cognitoIdentityId"` Caller string `json:"caller"` APIKey string `json:"apiKey"` + APIKeyID string `json:"apiKeyId"` AccessKey string `json:"accessKey"` SourceIP string `json:"sourceIp"` CognitoAuthenticationType string `json:"cognitoAuthenticationType"` CognitoAuthenticationProvider string `json:"cognitoAuthenticationProvider"` - UserArn string `json:"userArn"` + UserArn string `json:"userArn"` //nolint: stylecheck UserAgent string `json:"userAgent"` User string `json:"user"` } @@ -61,7 +152,7 @@ type APIGatewayRequestIdentity struct { type APIGatewayWebsocketProxyRequest struct { Resource string `json:"resource"` // The resource path defined in API Gateway Path string `json:"path"` // The url path for the caller - HTTPMethod string `json:"httpMethod"` + HTTPMethod string `json:"httpMethod,omitempty"` Headers map[string]string `json:"headers"` MultiValueHeaders map[string][]string `json:"multiValueHeaders"` QueryStringParameters map[string]string `json:"queryStringParameters"` @@ -101,10 +192,90 @@ type APIGatewayWebsocketProxyRequestContext struct { Status string `json:"status"` } -// APIGatewayCustomAuthorizerRequestTypeRequestIdentity contains identity information for the request caller. +// APIGatewayCustomAuthorizerRequestTypeRequestIdentity contains identity information for the request caller including certificate information if using mTLS. type APIGatewayCustomAuthorizerRequestTypeRequestIdentity struct { - APIKey string `json:"apiKey"` - SourceIP string `json:"sourceIp"` + APIKey string `json:"apiKey"` + SourceIP string `json:"sourceIp"` + ClientCert APIGatewayCustomAuthorizerRequestTypeRequestIdentityClientCert `json:"clientCert"` +} + +// APIGatewayCustomAuthorizerRequestTypeRequestIdentityClientCert contains certificate information for the request caller if using mTLS. +type APIGatewayCustomAuthorizerRequestTypeRequestIdentityClientCert struct { + ClientCertPem string `json:"clientCertPem"` + IssuerDN string `json:"issuerDN"` + SerialNumber string `json:"serialNumber"` + SubjectDN string `json:"subjectDN"` + Validity APIGatewayCustomAuthorizerRequestTypeRequestIdentityClientCertValidity `json:"validity"` +} + +// APIGatewayCustomAuthorizerRequestTypeRequestIdentityClientCertValidity contains certificate validity information for the request caller if using mTLS. +type APIGatewayCustomAuthorizerRequestTypeRequestIdentityClientCertValidity struct { + NotAfter string `json:"notAfter"` + NotBefore string `json:"notBefore"` +} + +// APIGatewayV2HTTPRequestContextAuthentication contains authentication context information for the request caller including client certificate information if using mTLS. +type APIGatewayV2HTTPRequestContextAuthentication struct { + ClientCert APIGatewayV2HTTPRequestContextAuthenticationClientCert `json:"clientCert"` +} + +// APIGatewayV2HTTPRequestContextAuthenticationClientCert contains client certificate information for the request caller if using mTLS. +type APIGatewayV2HTTPRequestContextAuthenticationClientCert struct { + ClientCertPem string `json:"clientCertPem"` + IssuerDN string `json:"issuerDN"` + SerialNumber string `json:"serialNumber"` + SubjectDN string `json:"subjectDN"` + Validity APIGatewayV2HTTPRequestContextAuthenticationClientCertValidity `json:"validity"` +} + +// APIGatewayV2HTTPRequestContextAuthenticationClientCertValidity contains client certificate validity information for the request caller if using mTLS. +type APIGatewayV2HTTPRequestContextAuthenticationClientCertValidity struct { + NotAfter string `json:"notAfter"` + NotBefore string `json:"notBefore"` +} + +type APIGatewayV2CustomAuthorizerV1RequestTypeRequestContext struct { + Path string `json:"path"` + AccountID string `json:"accountId"` + ResourceID string `json:"resourceId"` + Stage string `json:"stage"` + RequestID string `json:"requestId"` + Identity APIGatewayCustomAuthorizerRequestTypeRequestIdentity `json:"identity"` + ResourcePath string `json:"resourcePath"` + HTTPMethod string `json:"httpMethod"` + APIID string `json:"apiId"` +} + +type APIGatewayV2CustomAuthorizerV1Request struct { + Version string `json:"version"` + Type string `json:"type"` + MethodArn string `json:"methodArn"` //nolint: stylecheck + IdentitySource string `json:"identitySource"` + AuthorizationToken string `json:"authorizationToken"` + Resource string `json:"resource"` + Path string `json:"path"` + HTTPMethod string `json:"httpMethod"` + Headers map[string]string `json:"headers"` + QueryStringParameters map[string]string `json:"queryStringParameters"` + PathParameters map[string]string `json:"pathParameters"` + StageVariables map[string]string `json:"stageVariables"` + RequestContext APIGatewayV2CustomAuthorizerV1RequestTypeRequestContext `json:"requestContext"` +} + +type APIGatewayV2CustomAuthorizerV2Request struct { + Version string `json:"version"` + Type string `json:"type"` + RouteArn string `json:"routeArn"` //nolint: stylecheck + IdentitySource []string `json:"identitySource"` + RouteKey string `json:"routeKey"` + RawPath string `json:"rawPath"` + RawQueryString string `json:"rawQueryString"` + Cookies []string `json:"cookies"` + Headers map[string]string `json:"headers"` + QueryStringParameters map[string]string `json:"queryStringParameters"` + RequestContext APIGatewayV2HTTPRequestContext `json:"requestContext"` + PathParameters map[string]string `json:"pathParameters"` + StageVariables map[string]string `json:"stageVariables"` } // APIGatewayCustomAuthorizerContext represents the expected format of an API Gateway custom authorizer response. @@ -133,13 +304,13 @@ type APIGatewayCustomAuthorizerRequestTypeRequestContext struct { type APIGatewayCustomAuthorizerRequest struct { Type string `json:"type"` AuthorizationToken string `json:"authorizationToken"` - MethodArn string `json:"methodArn"` + MethodArn string `json:"methodArn"` //nolint: stylecheck } // APIGatewayCustomAuthorizerRequestTypeRequest contains data coming in to a custom API Gateway authorizer function. type APIGatewayCustomAuthorizerRequestTypeRequest struct { Type string `json:"type"` - MethodArn string `json:"methodArn"` + MethodArn string `json:"methodArn"` //nolint: stylecheck Resource string `json:"resource"` Path string `json:"path"` HTTPMethod string `json:"httpMethod"` @@ -160,14 +331,20 @@ type APIGatewayCustomAuthorizerResponse struct { UsageIdentifierKey string `json:"usageIdentifierKey,omitempty"` } -// APIGatewayCustomAuthorizerPolicy represents an IAM policy -type APIGatewayCustomAuthorizerPolicy struct { - Version string - Statement []IAMPolicyStatement +// APIGatewayV2CustomAuthorizerSimpleResponse represents the simple format of an API Gateway V2 authorization response. +type APIGatewayV2CustomAuthorizerSimpleResponse struct { + IsAuthorized bool `json:"isAuthorized"` + Context map[string]interface{} `json:"context,omitempty"` } -type IAMPolicyStatement struct { - Action []string - Effect string - Resource []string +// APIGatewayCustomAuthorizerPolicy represents an IAM policy. +// +// Note: This type exists for backwards compatibility. +// should reference IAMPolicyDocument directly instead. +type APIGatewayCustomAuthorizerPolicy IAMPolicyDocument + +type APIGatewayV2CustomAuthorizerIAMPolicyResponse struct { + PrincipalID string `json:"principalId"` + PolicyDocument APIGatewayCustomAuthorizerPolicy `json:"policyDocument"` + Context map[string]interface{} `json:"context,omitempty"` } diff --git a/vendor/github.com/aws/aws-lambda-go/events/appsync.go b/vendor/github.com/aws/aws-lambda-go/events/appsync.go index 3ada83f..37682b9 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/appsync.go +++ b/vendor/github.com/aws/aws-lambda-go/events/appsync.go @@ -2,7 +2,7 @@ package events import "encoding/json" -// AppSyncResolverTemplate represents the requests from AppSync to Lambda +// Deprecated: AppSyncResolverTemplate does not represent resolver events sent by AppSync. Instead directly model your input schema, or use map[string]string, json.RawMessage, interface{}, etc.. type AppSyncResolverTemplate struct { Version string `json:"version"` Operation AppSyncOperation `json:"operation"` @@ -11,12 +11,14 @@ type AppSyncResolverTemplate struct { // AppSyncIAMIdentity contains information about the caller authed via IAM. type AppSyncIAMIdentity struct { - AccountID string `json:"accountId"` - CognitoIdentityPoolID string `json:"cognitoIdentityPoolId"` - CognitoIdentityID string `json:"cognitoIdentityId"` - SourceIP []string `json:"sourceIp"` - Username string `json:"username"` - UserARN string `json:"userArn"` + AccountID string `json:"accountId"` + CognitoIdentityAuthProvider string `json:"cognitoIdentityAuthProvider"` + CognitoIdentityAuthType string `json:"cognitoIdentityAuthType"` + CognitoIdentityPoolID string `json:"cognitoIdentityPoolId"` + CognitoIdentityID string `json:"cognitoIdentityId"` + SourceIP []string `json:"sourceIp"` + Username string `json:"username"` + UserARN string `json:"userArn"` } // AppSyncCognitoIdentity contains information about the caller authed via Cognito. @@ -29,12 +31,37 @@ type AppSyncCognitoIdentity struct { DefaultAuthStrategy string `json:"defaultAuthStrategy"` } -// AppSyncOperation specifies the operation type supported by Lambda operations +// Deprecated: not used by any event schema type AppSyncOperation string const ( - // OperationInvoke lets AWS AppSync know to call your Lambda function for every GraphQL field resolver + // Deprecated: not used by any event schema OperationInvoke AppSyncOperation = "Invoke" - // OperationBatchInvoke instructs AWS AppSync to batch requests for the current GraphQL field + // Deprecated: not used by any event schema OperationBatchInvoke AppSyncOperation = "BatchInvoke" ) + +// AppSyncLambdaAuthorizerRequest contains an authorization request from AppSync. +type AppSyncLambdaAuthorizerRequest struct { + AuthorizationToken string `json:"authorizationToken"` + RequestContext AppSyncLambdaAuthorizerRequestContext `json:"requestContext"` +} + +// AppSyncLambdaAuthorizerRequestContext contains the parameters of the AppSync invocation which triggered +// this authorization request. +type AppSyncLambdaAuthorizerRequestContext struct { + APIID string `json:"apiId"` + AccountID string `json:"accountId"` + RequestID string `json:"requestId"` + QueryString string `json:"queryString"` + OperationName string `json:"operationName"` + Variables map[string]interface{} `json:"variables"` +} + +// AppSyncLambdaAuthorizerResponse represents the expected format of an authorization response to AppSync. +type AppSyncLambdaAuthorizerResponse struct { + IsAuthorized bool `json:"isAuthorized"` + ResolverContext map[string]interface{} `json:"resolverContext,omitempty"` + DeniedFields []string `json:"deniedFields,omitempty"` + TTLOverride *int `json:"ttlOverride,omitempty"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/attributevalue.go b/vendor/github.com/aws/aws-lambda-go/events/attributevalue.go index 0e64575..f582d30 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/attributevalue.go +++ b/vendor/github.com/aws/aws-lambda-go/events/attributevalue.go @@ -18,6 +18,10 @@ type DynamoDBAttributeValue struct { dataType DynamoDBDataType } +// This struct represents DynamoDBAttributeValue which doesn't +// implement fmt.Stringer interface and safely `fmt.Sprintf`able +type dynamoDbAttributeValue DynamoDBAttributeValue //nolint: stylecheck + // Binary provides access to an attribute of type Binary. // Method panics if the attribute is not of type Binary. func (av DynamoDBAttributeValue) Binary() []byte { @@ -64,6 +68,15 @@ func (av DynamoDBAttributeValue) Number() string { return av.value.(string) } +// Int64 provides access to an attribute of type Number. +// DynamoDB sends the values as strings. For convenience this method +// provides conversion to int. +// Method panics if the attribute is not of type Number. +func (av DynamoDBAttributeValue) Int64() (int64, error) { + number := av.Number() + return strconv.ParseInt(number, 10, 64) +} + // Integer provides access to an attribute of type Number. // DynamoDB sends the values as strings. For convenience this method // provides conversion to int. If the value cannot be represented by @@ -71,7 +84,12 @@ func (av DynamoDBAttributeValue) Number() string { // of an int64 of the appropriate sign. // Method panics if the attribute is not of type Number. func (av DynamoDBAttributeValue) Integer() (int64, error) { - s, err := strconv.ParseFloat(av.Number(), 64) + number := av.Number() + value, err := av.Int64() + if err == nil { + return value, nil + } + s, err := strconv.ParseFloat(number, 64) return int64(s), err } @@ -98,8 +116,13 @@ func (av DynamoDBAttributeValue) NumberSet() []string { // String provides access to an attribute of type String. // Method panics if the attribute is not of type String. func (av DynamoDBAttributeValue) String() string { - av.ensureType(DataTypeString) - return av.value.(string) + if av.dataType == DataTypeString { + return av.value.(string) + } + // If dataType is not DataTypeString during fmt.Sprintf("%#v", ...) + // compiler confuses with fmt.Stringer interface and panics + // instead of printing the struct. + return fmt.Sprintf("%v", dynamoDbAttributeValue(av)) } // StringSet provides access to an attribute of type String Set. diff --git a/vendor/github.com/aws/aws-lambda-go/events/chime_bot.go b/vendor/github.com/aws/aws-lambda-go/events/chime_bot.go new file mode 100644 index 0000000..fa08f00 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/chime_bot.go @@ -0,0 +1,31 @@ +// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +package events + +import ( + "time" +) + +type ChimeBotEvent struct { + Sender ChimeBotEventSender `json:"Sender"` + Discussion ChimeBotEventDiscussion `json:"Discussion"` + EventType string `json:"EventType"` + InboundHTTPSEndpoint *ChimeBotEventInboundHTTPSEndpoint `json:"InboundHttpsEndpoint,omitempty"` + EventTimestamp time.Time `json:"EventTimestamp"` + Message string `json:"Message,omitempty"` +} + +type ChimeBotEventSender struct { + SenderID string `json:"SenderId"` + SenderIDType string `json:"SenderIdType"` +} + +type ChimeBotEventDiscussion struct { + DiscussionID string `json:"DiscussionId"` + DiscussionType string `json:"DiscussionType"` +} + +type ChimeBotEventInboundHTTPSEndpoint struct { + EndpointType string `json:"EndpointType"` + URL string `json:"Url"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/clientvpn.go b/vendor/github.com/aws/aws-lambda-go/events/clientvpn.go new file mode 100644 index 0000000..af763d0 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/clientvpn.go @@ -0,0 +1,20 @@ +package events + +type ClientVPNConnectionHandlerRequest struct { + ConnectionID string `json:"connection-id"` + EndpointID string `json:"endpoint-id"` + CommonName string `json:"common-name"` + Username string `json:"username"` + OSPlatform string `json:"platform"` + OSPlatformVersion string `json:"platform-version"` + PublicIP string `json:"public-ip"` + ClientOpenVPNVersion string `json:"client-openvpn-version"` + SchemaVersion string `json:"schema-version"` +} + +type ClientVPNConnectionHandlerResponse struct { + Allow bool `json:"allow"` + ErrorMsgOnFailedPostureCompliance string `json:"error-msg-on-failed-posture-compliance"` + PostureComplianceStatuses []string `json:"posture-compliance-statuses"` + SchemaVersion string `json:"schema-version"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/cloudwatch_events.go b/vendor/github.com/aws/aws-lambda-go/events/cloudwatch_events.go index e3201fd..0b109ba 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/cloudwatch_events.go +++ b/vendor/github.com/aws/aws-lambda-go/events/cloudwatch_events.go @@ -5,8 +5,7 @@ import ( "time" ) -// CloudWatchEvent is the outer structure of an event sent via CloudWatch Events. -// For examples of events that come via CloudWatch Events, see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html +// CloudWatchEvent is the outer structure of an event sent via EventBridge serverless service. type CloudWatchEvent struct { Version string `json:"version"` ID string `json:"id"` @@ -18,3 +17,5 @@ type CloudWatchEvent struct { Resources []string `json:"resources"` Detail json.RawMessage `json:"detail"` } + +type EventBridgeEvent = CloudWatchEvent diff --git a/vendor/github.com/aws/aws-lambda-go/events/cloudwatch_logs.go b/vendor/github.com/aws/aws-lambda-go/events/cloudwatch_logs.go index 447cbd5..6b74b3b 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/cloudwatch_logs.go +++ b/vendor/github.com/aws/aws-lambda-go/events/cloudwatch_logs.go @@ -29,11 +29,11 @@ func (c CloudwatchLogsRawData) Parse() (d CloudwatchLogsData, err error) { if err != nil { return } + defer zr.Close() - buf := &bytes.Buffer{} - buf.ReadFrom(zr) + dec := json.NewDecoder(zr) + err = dec.Decode(&d) - err = json.Unmarshal(buf.Bytes(), &d) return } @@ -47,7 +47,7 @@ type CloudwatchLogsData struct { LogEvents []CloudwatchLogsLogEvent `json:"logEvents"` } -// LogEvent represents a log entry from cloudwatch logs +// CloudwatchLogsLogEvent represents a log entry from cloudwatch logs type CloudwatchLogsLogEvent struct { ID string `json:"id"` Timestamp int64 `json:"timestamp"` diff --git a/vendor/github.com/aws/aws-lambda-go/events/code_commit.go b/vendor/github.com/aws/aws-lambda-go/events/code_commit.go index e8672ba..9d5e071 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/code_commit.go +++ b/vendor/github.com/aws/aws-lambda-go/events/code_commit.go @@ -43,7 +43,7 @@ func (t *CodeCommitEventTime) UnmarshalJSON(data []byte) error { return err } -// represents a CodeCommit record +// CodeCommitRecord represents a CodeCommit record type CodeCommitRecord struct { EventID string `json:"eventId"` EventVersion string `json:"eventVersion"` @@ -52,7 +52,7 @@ type CodeCommitRecord struct { EventPartNumber uint64 `json:"eventPartNumber"` CodeCommit CodeCommitCodeCommit `json:"codecommit"` EventName string `json:"eventName"` - EventTriggerConfigId string `json:"eventTriggerConfigId"` + EventTriggerConfigId string `json:"eventTriggerConfigId"` //nolint: stylecheck EventSourceARN string `json:"eventSourceARN"` UserIdentityARN string `json:"userIdentityARN"` EventSource string `json:"eventSource"` @@ -91,11 +91,12 @@ type CodeCommitReference struct { Commit string `json:"commit"` Ref string `json:"ref"` Created bool `json:"created,omitempty"` + Deleted bool `json:"deleted,omitempty"` } // String returns a string representation of this object. // Useful for testing and debugging. func (r CodeCommitReference) String() string { return fmt.Sprintf( - "{commit: %v, ref: %v, created: %v}", r.Commit, r.Ref, r.Created) + "{commit: %v, ref: %v, created: %v, deleted: %v}", r.Commit, r.Ref, r.Created, r.Deleted) } diff --git a/vendor/github.com/aws/aws-lambda-go/events/codebuild.go b/vendor/github.com/aws/aws-lambda-go/events/codebuild.go new file mode 100644 index 0000000..08cbb60 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/codebuild.go @@ -0,0 +1,220 @@ +package events + +import ( + "encoding/json" + "time" +) + +const ( + CodeBuildEventSource = "aws.codebuild" + CodeBuildStateChangeDetailType = "CodeBuild Build State Change" + CodeBuildPhaseChangeDetailType = "CodeBuild Build Phase Change" +) + +// CodeBuildPhaseStatus represents the status of code build phase (i.e. failed, in progress) +type CodeBuildPhaseStatus string + +const ( + CodeBuildPhaseStatusFailed CodeBuildPhaseStatus = "FAILED" + CodeBuildPhaseStatusFault CodeBuildPhaseStatus = "FAULT" + CodeBuildPhaseStatusInProgress CodeBuildPhaseStatus = "IN_PROGRESS" + CodeBuildPhaseStatusQueued CodeBuildPhaseStatus = "QUEUED" + CodeBuildPhaseStatusStopped CodeBuildPhaseStatus = "STOPPED" + CodeBuildPhaseStatusSucceeded CodeBuildPhaseStatus = "SUCCEEDED" + CodeBuildPhaseStatusTimedOut CodeBuildPhaseStatus = "TIMED_OUT" +) + +// CodeBuildPhaseType represents the type of the code build phase (i.e. submitted, install) +type CodeBuildPhaseType string + +const ( + CodeBuildPhaseTypeSubmitted CodeBuildPhaseType = "SUBMITTED" + CodeBuildPhaseTypeQueued CodeBuildPhaseType = "QUEUED" + CodeBuildPhaseTypeProvisioning CodeBuildPhaseType = "PROVISIONING" + CodeBuildPhaseTypeDownloadSource CodeBuildPhaseType = "DOWNLOAD_SOURCE" + CodeBuildPhaseTypeInstall CodeBuildPhaseType = "INSTALL" + CodeBuildPhaseTypePreBuild CodeBuildPhaseType = "PRE_BUILD" + CodeBuildPhaseTypeBuild CodeBuildPhaseType = "BUILD" + CodeBuildPhaseTypePostBuild CodeBuildPhaseType = "POST_BUILD" + CodeBuildPhaseTypeUploadArtifacts CodeBuildPhaseType = "UPLOAD_ARTIFACTS" + CodeBuildPhaseTypeFinalizing CodeBuildPhaseType = "FINALIZING" + CodeBuildPhaseTypeCompleted CodeBuildPhaseType = "COMPLETED" +) + +// CodeBuildEvent is documented at: +// https://docs.aws.amazon.com/codebuild/latest/userguide/sample-build-notifications.html#sample-build-notifications-ref +type CodeBuildEvent struct { + // AccountID is the id of the AWS account from which the event originated. + AccountID string `json:"account"` + + // Region is the AWS region from which the event originated. + Region string `json:"region"` + + // DetailType informs the schema of the Detail field. For build state-change + // events, the value will be CodeBuildStateChangeDetailType. For phase-change + // events, it will be CodeBuildPhaseChangeDetailType. + DetailType string `json:"detail-type"` + + // Source should be equal to CodeBuildEventSource. + Source string `json:"source"` + + // Version is the version of the event's schema. + Version string `json:"version"` + + // Time is the event's timestamp. + Time time.Time `json:"time"` + + // ID is the GUID of this event. + ID string `json:"id"` + + // Resources is a list of ARNs of CodeBuild builds that this event pertains to. + Resources []string `json:"resources"` + + // Detail contains information specific to a build state-change or + // build phase-change event. + Detail CodeBuildEventDetail `json:"detail"` +} + +// CodeBuildEventDetail represents the all details related to the code build event +type CodeBuildEventDetail struct { + BuildStatus CodeBuildPhaseStatus `json:"build-status"` + ProjectName string `json:"project-name"` + BuildID string `json:"build-id"` + AdditionalInformation CodeBuildEventAdditionalInformation `json:"additional-information"` + CurrentPhase CodeBuildPhaseType `json:"current-phase"` + CurrentPhaseContext string `json:"current-phase-context"` + Version string `json:"version"` + + CompletedPhaseStatus CodeBuildPhaseStatus `json:"completed-phase-status"` + CompletedPhase CodeBuildPhaseType `json:"completed-phase"` + CompletedPhaseContext string `json:"completed-phase-context"` + CompletedPhaseDuration DurationSeconds `json:"completed-phase-duration-seconds"` + CompletedPhaseStart CodeBuildTime `json:"completed-phase-start"` + CompletedPhaseEnd CodeBuildTime `json:"completed-phase-end"` +} + +// CodeBuildEventAdditionalInformation represents additional information to the code build event +type CodeBuildEventAdditionalInformation struct { + Artifact CodeBuildArtifact `json:"artifact"` + + Environment CodeBuildEnvironment `json:"environment"` + + Timeout DurationMinutes `json:"timeout-in-minutes"` + + BuildComplete bool `json:"build-complete"` + + BuildNumber CodeBuildNumber `json:"build-number,omitempty"` + + Initiator string `json:"initiator"` + + BuildStartTime CodeBuildTime `json:"build-start-time"` + + Source CodeBuildSource `json:"source"` + + SourceVersion string `json:"source-version"` + + Logs CodeBuildLogs `json:"logs"` + + Phases []CodeBuildPhase `json:"phases"` +} + +// CodeBuildArtifact represents the artifact provided to build +type CodeBuildArtifact struct { + MD5Sum string `json:"md5sum"` + SHA256Sum string `json:"sha256sum"` + Location string `json:"location"` +} + +// CodeBuildEnvironment represents the environment for a build +type CodeBuildEnvironment struct { + Image string `json:"image"` + PrivilegedMode bool `json:"privileged-mode"` + ComputeType string `json:"compute-type"` + Type string `json:"type"` + EnvironmentVariables []CodeBuildEnvironmentVariable `json:"environment-variables"` +} + +// CodeBuildEnvironmentVariable encapsulate environment variables for the code build +type CodeBuildEnvironmentVariable struct { + // Name is the name of the environment variable. + Name string `json:"name"` + + // Type is PLAINTEXT or PARAMETER_STORE. + Type string `json:"type"` + + // Value is the value of the environment variable. + Value string `json:"value"` +} + +// CodeBuildSource represent the code source will be build +type CodeBuildSource struct { + Location string `json:"location"` + Type string `json:"type"` +} + +// CodeBuildLogs gives the log details of a code build +type CodeBuildLogs struct { + GroupName string `json:"group-name"` + StreamName string `json:"stream-name"` + DeepLink string `json:"deep-link"` +} + +// CodeBuildPhase represents the phase of a build and its details +type CodeBuildPhase struct { + PhaseContext []interface{} `json:"phase-context,omitempty"` + + StartTime CodeBuildTime `json:"start-time"` + + EndTime CodeBuildTime `json:"end-time,omitempty"` + + Duration DurationSeconds `json:"duration-in-seconds,omitempty"` + + PhaseType CodeBuildPhaseType `json:"phase-type"` + + PhaseStatus CodeBuildPhaseStatus `json:"phase-status,omitempty"` +} + +// CodeBuildTime represents the time of the build +type CodeBuildTime time.Time + +const codeBuildTimeFormat = "Jan 2, 2006 3:04:05 PM" + +// MarshalJSON converts a given CodeBuildTime to json +func (t CodeBuildTime) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).Format(codeBuildTimeFormat)) +} + +// UnmarshalJSON converts a given json to a CodeBuildTime +func (t *CodeBuildTime) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + ts, err := time.Parse(codeBuildTimeFormat, s) + if err != nil { + return err + } + + *t = CodeBuildTime(ts) + return nil +} + +// CodeBuildNumber represents the number of the build +type CodeBuildNumber int32 + +// MarshalJSON converts a given CodeBuildNumber to json +func (n CodeBuildNumber) MarshalJSON() ([]byte, error) { + return json.Marshal(float32(n)) +} + +// UnmarshalJSON converts a given json to a CodeBuildNumber +func (n *CodeBuildNumber) UnmarshalJSON(data []byte) error { + var f float32 + if err := json.Unmarshal(data, &f); err != nil { + return err + } + + *n = CodeBuildNumber(int32(f)) + return nil +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/codedeploy.go b/vendor/github.com/aws/aws-lambda-go/events/codedeploy.go new file mode 100644 index 0000000..af239bb --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/codedeploy.go @@ -0,0 +1,78 @@ +package events + +import ( + "time" +) + +const ( + CodeDeployEventSource = "aws.codedeploy" + CodeDeployDeploymentEventDetailType = "CodeDeploy Deployment State-change Notification" + CodeDeployInstanceEventDetailType = "CodeDeploy Instance State-change Notification" +) + +type CodeDeployDeploymentState string + +const ( + CodeDeployDeploymentStateFailure CodeDeployDeploymentState = "FAILURE" + CodeDeployDeploymentStateReady CodeDeployDeploymentState = "READY" + CodeDeployDeploymentStateStart CodeDeployDeploymentState = "START" + CodeDeployDeploymentStateStop CodeDeployDeploymentState = "STOP" + CodeDeployDeploymentStateSuccess CodeDeployDeploymentState = "SUCCESS" +) + +type CodeDeployEvent struct { + // AccountID is the id of the AWS account from which the event originated. + AccountID string `json:"account"` + + // Region is the AWS region from which the event originated. + Region string `json:"region"` + + // DetailType informs the schema of the Detail field. For deployment state-change + // events, the value should be equal to CodeDeployDeploymentEventDetailType. + // For instance state-change events, the value should be equal to + // CodeDeployInstanceEventDetailType. + DetailType string `json:"detail-type"` + + // Source should be equal to CodeDeployEventSource. + Source string `json:"source"` + + // Version is the version of the event's schema. + Version string `json:"version"` + + // Time is the event's timestamp. + Time time.Time `json:"time"` + + // ID is the GUID of this event. + ID string `json:"id"` + + // Resources is a list of ARNs of CodeDeploy applications and deployment + // groups that this event pertains to. + Resources []string `json:"resources"` + + // Detail contains information specific to a deployment event. + Detail CodeDeployEventDetail `json:"detail"` +} + +type CodeDeployEventDetail struct { + // InstanceGroupID is the ID of the instance group. + InstanceGroupID string `json:"instanceGroupId"` + + // InstanceID is the id of the instance. This field is non-empty only if + // the DetailType of the complete event is CodeDeployInstanceEventDetailType. + InstanceID string `json:"instanceId,omitempty"` + + // Region is the AWS region that the event originated from. + Region string `json:"region"` + + // Application is the name of the CodeDeploy application. + Application string `json:"application"` + + // DeploymentID is the id of the deployment. + DeploymentID string `json:"deploymentId"` + + // State is the new state of the deployment. + State CodeDeployDeploymentState `json:"state"` + + // DeploymentGroup is the name of the deployment group. + DeploymentGroup string `json:"deploymentGroup"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/codepipeline.go b/vendor/github.com/aws/aws-lambda-go/events/codepipeline.go new file mode 100644 index 0000000..4e15de5 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/codepipeline.go @@ -0,0 +1,7 @@ +package events + +// CodePipelineJob has been incorrectly assigned as CodePipelineEvent +// - https://github.com/aws/aws-lambda-go/issues/244 +// +// This maintains backwards compatability until a v2 release +type CodePipelineEvent = CodePipelineJobEvent diff --git a/vendor/github.com/aws/aws-lambda-go/events/codepipeline_cloudwatch.go b/vendor/github.com/aws/aws-lambda-go/events/codepipeline_cloudwatch.go new file mode 100644 index 0000000..41340c4 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/codepipeline_cloudwatch.go @@ -0,0 +1,119 @@ +package events + +import ( + "time" +) + +const ( + CodePipelineEventSource = "aws.codepipeline" + CodePipelineExecutionEventDetailType = "CodePipeline Pipeline Execution State Change" + CodePipelineActionEventDetailType = "CodePipeline Action Execution State Change" + CodePipelineStageEventDetailType = "CodePipeline Stage Execution State Change" +) + +type CodePipelineStageState string + +const ( + CodePipelineStageStateStarted CodePipelineStageState = "STARTED" + CodePipelineStageStateSucceeded CodePipelineStageState = "SUCCEEDED" + CodePipelineStageStateResumed CodePipelineStageState = "RESUMED" + CodePipelineStageStateFailed CodePipelineStageState = "FAILED" + CodePipelineStageStateCanceled CodePipelineStageState = "CANCELED" +) + +type CodePipelineState string + +const ( + CodePipelineStateStarted CodePipelineState = "STARTED" + CodePipelineStateSucceeded CodePipelineState = "SUCCEEDED" + CodePipelineStateResumed CodePipelineState = "RESUMED" + CodePipelineStateFailed CodePipelineState = "FAILED" + CodePipelineStateCanceled CodePipelineState = "CANCELED" + CodePipelineStateSuperseded CodePipelineState = "SUPERSEDED" +) + +type CodePipelineActionState string + +const ( + CodePipelineActionStateStarted CodePipelineActionState = "STARTED" + CodePipelineActionStateSucceeded CodePipelineActionState = "SUCCEEDED" + CodePipelineActionStateFailed CodePipelineActionState = "FAILED" + CodePipelineActionStateCanceled CodePipelineActionState = "CANCELED" +) + +type CodePipelineCloudWatchEvent struct { + // Version is the version of the event's schema. + Version string `json:"version"` + + // ID is the GUID of this event. + ID string `json:"id"` + + // DetailType informs the schema of the Detail field. For deployment state-change + // events, the value should be equal to CodePipelineDeploymentEventDetailType. + // For instance state-change events, the value should be equal to + // CodePipelineInstanceEventDetailType. + DetailType string `json:"detail-type"` + + // Source should be equal to CodePipelineEventSource. + Source string `json:"source"` + + // AccountID is the id of the AWS account from which the event originated. + AccountID string `json:"account"` + + // Time is the event's timestamp. + Time time.Time `json:"time"` + + // Region is the AWS region from which the event originated. + Region string `json:"region"` + + // Resources is a list of ARNs of CodePipeline applications and deployment + // groups that this event pertains to. + Resources []string `json:"resources"` + + // Detail contains information specific to a deployment event. + Detail CodePipelineEventDetail `json:"detail"` +} + +type CodePipelineEventBridgeEvent = CodePipelineCloudWatchEvent + +type CodePipelineEventDetail struct { + Pipeline string `json:"pipeline"` + + // From live testing this is always int64 not string as documented + Version int64 `json:"version"` + + ExecutionID string `json:"execution-id"` + + Stage string `json:"stage"` + + Action string `json:"action"` + + State CodePipelineState `json:"state"` + + Region string `json:"region"` + + Type CodePipelineEventDetailType `json:"type,omitempty"` + + ExecutionResult CodePipelineEventDetailExecutionResult `json:"execution-result,omitempty"` +} + +type CodePipelineEventDetailType struct { + Owner string `json:"owner"` + + Category string `json:"category"` + + Provider string `json:"provider"` + + // From published EventBridge schema registry this is always int64 not string as documented + Version int64 `json:"version"` +} + +type CodePipelineEventDetailExecutionResult struct { + ExternalExecutionURL string `json:"external-execution-url"` + + ExternalExecutionSummary string `json:"external-execution-summary"` + + ExternalExecutionID string `json:"external-execution-id"` + + ErrorCode string `json:"error-code,omitempty"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/codepipeline_job.go b/vendor/github.com/aws/aws-lambda-go/events/codepipeline_job.go index 68f9634..dd3b823 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/codepipeline_job.go +++ b/vendor/github.com/aws/aws-lambda-go/events/codepipeline_job.go @@ -2,8 +2,8 @@ package events -// CodePipelineEvent contains data from an event sent from AWS Codepipeline -type CodePipelineEvent struct { +// CodePipelineJobEvent contains data from an event sent from AWS CodePipeline +type CodePipelineJobEvent struct { CodePipelineJob CodePipelineJob `json:"CodePipeline.job"` } diff --git a/vendor/github.com/aws/aws-lambda-go/events/cognito.go b/vendor/github.com/aws/aws-lambda-go/events/cognito.go index a518abf..f02619d 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/cognito.go +++ b/vendor/github.com/aws/aws-lambda-go/events/cognito.go @@ -2,7 +2,7 @@ package events -// CognitoEvent contains data from an event sent from AWS Cognito Sync +// CognitoEvent contains data from an event sent from Amazon Cognito Sync type CognitoEvent struct { DatasetName string `json:"datasetName"` DatasetRecords map[string]CognitoDatasetRecord `json:"datasetRecords"` @@ -13,14 +13,14 @@ type CognitoEvent struct { Version int `json:"version"` } -// CognitoDatasetRecord represents a record from an AWS Cognito Sync event +// CognitoDatasetRecord represents a record from an Amazon Cognito Sync event type CognitoDatasetRecord struct { NewValue string `json:"newValue"` OldValue string `json:"oldValue"` Op string `json:"op"` } -// CognitoEventUserPoolsPreSignup is sent by AWS Cognito User Pools when a user attempts to register +// CognitoEventUserPoolsPreSignup is sent by Amazon Cognito User Pools when a user attempts to register // (sign up), allowing a Lambda to perform custom validation to accept or deny the registration request type CognitoEventUserPoolsPreSignup struct { CognitoEventUserPoolsHeader @@ -28,7 +28,15 @@ type CognitoEventUserPoolsPreSignup struct { Response CognitoEventUserPoolsPreSignupResponse `json:"response"` } -// CognitoEventUserPoolsPostConfirmation is sent by AWS Cognito User Pools after a user is confirmed, +// CognitoEventUserPoolsPreAuthentication is sent by Amazon Cognito User Pools when a user submits their information +// to be authenticated, allowing you to perform custom validations to accept or deny the sign in request. +type CognitoEventUserPoolsPreAuthentication struct { + CognitoEventUserPoolsHeader + Request CognitoEventUserPoolsPreAuthenticationRequest `json:"request"` + Response CognitoEventUserPoolsPreAuthenticationResponse `json:"response"` +} + +// CognitoEventUserPoolsPostConfirmation is sent by Amazon Cognito User Pools after a user is confirmed, // allowing the Lambda to send custom messages or add custom logic. type CognitoEventUserPoolsPostConfirmation struct { CognitoEventUserPoolsHeader @@ -36,21 +44,45 @@ type CognitoEventUserPoolsPostConfirmation struct { Response CognitoEventUserPoolsPostConfirmationResponse `json:"response"` } -// CognitoEventUserPoolsPreTokenGen is sent by AWS Cognito User Pools when a user attempts to retrieve -// credentials, allowing a Lambda to perform insert, supress or override claims +// CognitoEventUserPoolsPreTokenGen is sent by Amazon Cognito User Pools when a user attempts to retrieve +// credentials, allowing a Lambda to perform insert, suppress or override claims type CognitoEventUserPoolsPreTokenGen struct { CognitoEventUserPoolsHeader Request CognitoEventUserPoolsPreTokenGenRequest `json:"request"` Response CognitoEventUserPoolsPreTokenGenResponse `json:"response"` } +// CognitoEventUserPoolsPreTokenGenV2 is sent by Amazon Cognito User Pools when a user attempts to retrieve +// credentials, allowing a Lambda to perform insert, suppress or override claims and scopes +type CognitoEventUserPoolsPreTokenGenV2 struct { + CognitoEventUserPoolsHeader + Request CognitoEventUserPoolsPreTokenGenV2Request `json:"request"` + Response CognitoEventUserPoolsPreTokenGenV2Response `json:"response"` +} + +// CognitoEventUserPoolsPostAuthentication is sent by Amazon Cognito User Pools after a user is authenticated, +// allowing the Lambda to add custom logic. +type CognitoEventUserPoolsPostAuthentication struct { + CognitoEventUserPoolsHeader + Request CognitoEventUserPoolsPostAuthenticationRequest `json:"request"` + Response CognitoEventUserPoolsPostAuthenticationResponse `json:"response"` +} + +// CognitoEventUserPoolsMigrateUser is sent by Amazon Cognito User Pools when a user does not exist in the +// user pool at the time of sign-in with a password, or in the forgot-password flow. +type CognitoEventUserPoolsMigrateUser struct { + CognitoEventUserPoolsHeader + CognitoEventUserPoolsMigrateUserRequest `json:"request"` + CognitoEventUserPoolsMigrateUserResponse `json:"response"` +} + // CognitoEventUserPoolsCallerContext contains information about the caller type CognitoEventUserPoolsCallerContext struct { AWSSDKVersion string `json:"awsSdkVersion"` ClientID string `json:"clientId"` } -// CognitoEventUserPoolsHeader contains common data from events sent by AWS Cognito User Pools +// CognitoEventUserPoolsHeader contains common data from events sent by Amazon Cognito User Pools type CognitoEventUserPoolsHeader struct { Version string `json:"version"` TriggerSource string `json:"triggerSource"` @@ -64,6 +96,7 @@ type CognitoEventUserPoolsHeader struct { type CognitoEventUserPoolsPreSignupRequest struct { UserAttributes map[string]string `json:"userAttributes"` ValidationData map[string]string `json:"validationData"` + ClientMetadata map[string]string `json:"clientMetadata"` } // CognitoEventUserPoolsPreSignupResponse contains the response portion of a PreSignup event @@ -73,9 +106,20 @@ type CognitoEventUserPoolsPreSignupResponse struct { AutoVerifyPhone bool `json:"autoVerifyPhone"` } +// CognitoEventUserPoolsPreAuthenticationRequest contains the request portion of a PreAuthentication event +type CognitoEventUserPoolsPreAuthenticationRequest struct { + UserAttributes map[string]string `json:"userAttributes"` + ValidationData map[string]string `json:"validationData"` +} + +// CognitoEventUserPoolsPreAuthenticationResponse contains the response portion of a PreAuthentication event +type CognitoEventUserPoolsPreAuthenticationResponse struct { +} + // CognitoEventUserPoolsPostConfirmationRequest contains the request portion of a PostConfirmation event type CognitoEventUserPoolsPostConfirmationRequest struct { UserAttributes map[string]string `json:"userAttributes"` + ClientMetadata map[string]string `json:"clientMetadata"` } // CognitoEventUserPoolsPostConfirmationResponse contains the response portion of a PostConfirmation event @@ -86,23 +130,181 @@ type CognitoEventUserPoolsPostConfirmationResponse struct { type CognitoEventUserPoolsPreTokenGenRequest struct { UserAttributes map[string]string `json:"userAttributes"` GroupConfiguration GroupConfiguration `json:"groupConfiguration"` + ClientMetadata map[string]string `json:"clientMetadata"` } -// CognitoEventUserPoolsPreTokenGenResponse containst the response portion of a PreTokenGen event +// CognitoEventUserPoolsPreTokenGenV2Request contains request portion of V2 PreTokenGen event +type CognitoEventUserPoolsPreTokenGenV2Request struct { + UserAttributes map[string]string `json:"userAttributes"` + GroupConfiguration GroupConfiguration `json:"groupConfiguration"` + ClientMetadata map[string]string `json:"clientMetadata,omitempty"` + Scopes []string `json:"scopes"` +} + +// CognitoEventUserPoolsPreTokenGenResponse contains the response portion of a PreTokenGen event type CognitoEventUserPoolsPreTokenGenResponse struct { ClaimsOverrideDetails ClaimsOverrideDetails `json:"claimsOverrideDetails"` } -// ClaimsOverrideDetails allows lambda to add, supress or override claims in the token +// CognitoEventUserPoolsPreTokenGenV2Response contains the response portion of a V2 PreTokenGen event +type CognitoEventUserPoolsPreTokenGenV2Response struct { + ClaimsAndScopeOverrideDetails ClaimsAndScopeOverrideDetails `json:"claimsAndScopeOverrideDetails"` +} + +// CognitoEventUserPoolsPostAuthenticationRequest contains the request portion of a PostAuthentication event +type CognitoEventUserPoolsPostAuthenticationRequest struct { + NewDeviceUsed bool `json:"newDeviceUsed"` + UserAttributes map[string]string `json:"userAttributes"` + ClientMetadata map[string]string `json:"clientMetadata"` +} + +// CognitoEventUserPoolsPostAuthenticationResponse contains the response portion of a PostAuthentication event +type CognitoEventUserPoolsPostAuthenticationResponse struct { +} + +// CognitoEventUserPoolsMigrateUserRequest contains the request portion of a MigrateUser event +type CognitoEventUserPoolsMigrateUserRequest struct { + Password string `json:"password"` + ValidationData map[string]string `json:"validationData"` + ClientMetadata map[string]string `json:"clientMetadata"` +} + +// CognitoEventUserPoolsMigrateUserResponse contains the response portion of a MigrateUser event +type CognitoEventUserPoolsMigrateUserResponse struct { + UserAttributes map[string]string `json:"userAttributes"` + FinalUserStatus string `json:"finalUserStatus"` + MessageAction string `json:"messageAction"` + DesiredDeliveryMediums []string `json:"desiredDeliveryMediums"` + ForceAliasCreation bool `json:"forceAliasCreation"` +} + +// ClaimsAndScopeOverrideDetails allows lambda to add, suppress or override V2 claims and scopes in the token +type ClaimsAndScopeOverrideDetails struct { + IDTokenGeneration IDTokenGeneration `json:"idTokenGeneration"` + AccessTokenGeneration AccessTokenGeneration `json:"accessTokenGeneration"` + GroupOverrideDetails GroupConfiguration `json:"groupOverrideDetails"` +} + +// IDTokenGeneration allows lambda to modify the ID token +type IDTokenGeneration struct { + ClaimsToAddOrOverride map[string]string `json:"claimsToAddOrOverride"` + ClaimsToSuppress []string `json:"claimsToSuppress"` +} + +// AccessTokenGeneration allows lambda to modify the access token +type AccessTokenGeneration struct { + ClaimsToAddOrOverride map[string]string `json:"claimsToAddOrOverride"` + ClaimsToSuppress []string `json:"claimsToSuppress"` + ScopesToAdd []string `json:"scopesToAdd"` + ScopesToSuppress []string `json:"scopesToSuppress"` +} + +// ClaimsOverrideDetails allows lambda to add, suppress or override claims in the token type ClaimsOverrideDetails struct { GroupOverrideDetails GroupConfiguration `json:"groupOverrideDetails"` ClaimsToAddOrOverride map[string]string `json:"claimsToAddOrOverride"` ClaimsToSuppress []string `json:"claimsToSuppress"` } -// GroupConfiguration allows lambda to override groups, roles and set a perferred role +// GroupConfiguration allows lambda to override groups, roles and set a preferred role type GroupConfiguration struct { GroupsToOverride []string `json:"groupsToOverride"` IAMRolesToOverride []string `json:"iamRolesToOverride"` PreferredRole *string `json:"preferredRole"` } + +// CognitoEventUserPoolsChallengeResult represents a challenge that is presented to the user in the authentication +// process that is underway, along with the corresponding result. +type CognitoEventUserPoolsChallengeResult struct { + ChallengeName string `json:"challengeName"` + ChallengeResult bool `json:"challengeResult"` + ChallengeMetadata string `json:"challengeMetadata"` +} + +// CognitoEventUserPoolsDefineAuthChallengeRequest defines auth challenge request parameters +type CognitoEventUserPoolsDefineAuthChallengeRequest struct { + UserAttributes map[string]string `json:"userAttributes"` + Session []*CognitoEventUserPoolsChallengeResult `json:"session"` + ClientMetadata map[string]string `json:"clientMetadata"` + UserNotFound bool `json:"userNotFound"` +} + +// CognitoEventUserPoolsDefineAuthChallengeResponse defines auth challenge response parameters +type CognitoEventUserPoolsDefineAuthChallengeResponse struct { + ChallengeName string `json:"challengeName"` + IssueTokens bool `json:"issueTokens"` + FailAuthentication bool `json:"failAuthentication"` +} + +// CognitoEventUserPoolsDefineAuthChallenge sent by Amazon Cognito User Pools to initiate custom authentication flow +type CognitoEventUserPoolsDefineAuthChallenge struct { + CognitoEventUserPoolsHeader + Request CognitoEventUserPoolsDefineAuthChallengeRequest `json:"request"` + Response CognitoEventUserPoolsDefineAuthChallengeResponse `json:"response"` +} + +// CognitoEventUserPoolsCreateAuthChallengeRequest defines create auth challenge request parameters +type CognitoEventUserPoolsCreateAuthChallengeRequest struct { + UserAttributes map[string]string `json:"userAttributes"` + ChallengeName string `json:"challengeName"` + Session []*CognitoEventUserPoolsChallengeResult `json:"session"` + ClientMetadata map[string]string `json:"clientMetadata"` +} + +// CognitoEventUserPoolsCreateAuthChallengeResponse defines create auth challenge response rarameters +type CognitoEventUserPoolsCreateAuthChallengeResponse struct { + PublicChallengeParameters map[string]string `json:"publicChallengeParameters"` + PrivateChallengeParameters map[string]string `json:"privateChallengeParameters"` + ChallengeMetadata string `json:"challengeMetadata"` +} + +// CognitoEventUserPoolsCreateAuthChallenge sent by Amazon Cognito User Pools to create a challenge to present to the user +type CognitoEventUserPoolsCreateAuthChallenge struct { + CognitoEventUserPoolsHeader + Request CognitoEventUserPoolsCreateAuthChallengeRequest `json:"request"` + Response CognitoEventUserPoolsCreateAuthChallengeResponse `json:"response"` +} + +// CognitoEventUserPoolsVerifyAuthChallengeRequest defines verify auth challenge request parameters +type CognitoEventUserPoolsVerifyAuthChallengeRequest struct { + UserAttributes map[string]string `json:"userAttributes"` + PrivateChallengeParameters map[string]string `json:"privateChallengeParameters"` + ChallengeAnswer interface{} `json:"challengeAnswer"` + ClientMetadata map[string]string `json:"clientMetadata"` +} + +// CognitoEventUserPoolsVerifyAuthChallengeResponse defines verify auth challenge response parameters +type CognitoEventUserPoolsVerifyAuthChallengeResponse struct { + AnswerCorrect bool `json:"answerCorrect"` +} + +// CognitoEventUserPoolsVerifyAuthChallenge sent by Amazon Cognito User Pools to verify if the response from the end user +// for a custom Auth Challenge is valid or not +type CognitoEventUserPoolsVerifyAuthChallenge struct { + CognitoEventUserPoolsHeader + Request CognitoEventUserPoolsVerifyAuthChallengeRequest `json:"request"` + Response CognitoEventUserPoolsVerifyAuthChallengeResponse `json:"response"` +} + +// CognitoEventUserPoolsCustomMessage is sent by Amazon Cognito User Pools before a verification or MFA message is sent, +// allowing a user to customize the message dynamically. +type CognitoEventUserPoolsCustomMessage struct { + CognitoEventUserPoolsHeader + Request CognitoEventUserPoolsCustomMessageRequest `json:"request"` + Response CognitoEventUserPoolsCustomMessageResponse `json:"response"` +} + +// CognitoEventUserPoolsCustomMessageRequest contains the request portion of a CustomMessage event +type CognitoEventUserPoolsCustomMessageRequest struct { + UserAttributes map[string]interface{} `json:"userAttributes"` + CodeParameter string `json:"codeParameter"` + UsernameParameter string `json:"usernameParameter"` + ClientMetadata map[string]string `json:"clientMetadata"` +} + +// CognitoEventUserPoolsCustomMessageResponse contains the response portion of a CustomMessage event +type CognitoEventUserPoolsCustomMessageResponse struct { + SMSMessage string `json:"smsMessage"` + EmailMessage string `json:"emailMessage"` + EmailSubject string `json:"emailSubject"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/config.go b/vendor/github.com/aws/aws-lambda-go/events/config.go index 951809c..816b684 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/config.go +++ b/vendor/github.com/aws/aws-lambda-go/events/config.go @@ -4,14 +4,21 @@ package events // ConfigEvent contains data from an event sent from AWS Config type ConfigEvent struct { - AccountID string `json:"accountId"` // The ID of the AWS account that owns the rule - ConfigRuleArn string `json:"configRuleArn"` // The ARN that AWS Config assigned to the rule - ConfigRuleID string `json:"configRuleId"` - ConfigRuleName string `json:"configRuleName"` // The name that you assigned to the rule that caused AWS Config to publish the event - EventLeftScope bool `json:"eventLeftScope"` // A boolean value that indicates whether the AWS resource to be evaluated has been removed from the rule's scope - ExecutionRoleArn string `json:"executionRoleArn"` - InvokingEvent string `json:"invokingEvent"` // If the event is published in response to a resource configuration change, this value contains a JSON configuration item - ResultToken string `json:"resultToken"` // A token that the function must pass to AWS Config with the PutEvaluations call - RuleParameters string `json:"ruleParameters"` // Key/value pairs that the function processes as part of its evaluation logic - Version string `json:"version"` + // The ID of the AWS account that owns the rule + AccountID string `json:"accountId"` + // The ARN that AWS Config assigned to the rule + ConfigRuleArn string `json:"configRuleArn"` //nolint:stylecheck + ConfigRuleID string `json:"configRuleId"` //nolint:stylecheck + // The name that you assigned to the rule that caused AWS Config to publish the event + ConfigRuleName string `json:"configRuleName"` + // A boolean value that indicates whether the AWS resource to be evaluated has been removed from the rule's scope + EventLeftScope bool `json:"eventLeftScope"` + ExecutionRoleArn string `json:"executionRoleArn"` //nolint:stylecheck + // If the event is published in response to a resource configuration change, this value contains a JSON configuration item + InvokingEvent string `json:"invokingEvent"` + // A token that the function must pass to AWS Config with the PutEvaluations call + ResultToken string `json:"resultToken"` + // Key/value pairs that the function processes as part of its evaluation logic + RuleParameters string `json:"ruleParameters"` + Version string `json:"version"` } diff --git a/vendor/github.com/aws/aws-lambda-go/events/duration.go b/vendor/github.com/aws/aws-lambda-go/events/duration.go new file mode 100644 index 0000000..7952265 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/duration.go @@ -0,0 +1,45 @@ +package events + +import ( + "encoding/json" + "math" + "time" +) + +type DurationSeconds time.Duration + +// UnmarshalJSON converts a given json to a DurationSeconds +func (duration *DurationSeconds) UnmarshalJSON(data []byte) error { + var seconds float64 + if err := json.Unmarshal(data, &seconds); err != nil { + return err + } + + *duration = DurationSeconds(time.Duration(seconds) * time.Second) + return nil +} + +// MarshalJSON converts a given DurationSeconds to json +func (duration DurationSeconds) MarshalJSON() ([]byte, error) { + seconds := time.Duration(duration).Seconds() + return json.Marshal(int64(math.Ceil(seconds))) +} + +type DurationMinutes time.Duration + +// UnmarshalJSON converts a given json to a DurationMinutes +func (duration *DurationMinutes) UnmarshalJSON(data []byte) error { + var minutes float64 + if err := json.Unmarshal(data, &minutes); err != nil { + return err + } + + *duration = DurationMinutes(time.Duration(minutes) * time.Minute) + return nil +} + +// MarshalJSON converts a given DurationMinutes to json +func (duration DurationMinutes) MarshalJSON() ([]byte, error) { + minutes := time.Duration(duration).Minutes() + return json.Marshal(int64(math.Ceil(minutes))) +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/dynamodb.go b/vendor/github.com/aws/aws-lambda-go/events/dynamodb.go index 09fcd34..d5eeeee 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/dynamodb.go +++ b/vendor/github.com/aws/aws-lambda-go/events/dynamodb.go @@ -8,7 +8,20 @@ type DynamoDBEvent struct { Records []DynamoDBEventRecord `json:"Records"` } -// DynamoDbEventRecord stores information about each record of a DynamoDb stream event +// DynamoDBTimeWindowEvent represents an Amazon Dynamodb event when using time windows +// ref. https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-windows +type DynamoDBTimeWindowEvent struct { + DynamoDBEvent + TimeWindowProperties +} + +// DynamoDBTimeWindowEventResponse is the outer structure to report batch item failures for DynamoDBTimeWindowEvent. +type DynamoDBTimeWindowEventResponse struct { + TimeWindowEventResponseProperties + BatchItemFailures []DynamoDBBatchItemFailure `json:"batchItemFailures"` +} + +// DynamoDBEventRecord stores information about each record of a DynamoDB stream event type DynamoDBEventRecord struct { // The region in which the GetRecords request was received. AWSRegion string `json:"awsRegion"` @@ -43,7 +56,7 @@ type DynamoDBEventRecord struct { EventVersion string `json:"eventVersion"` // The event source ARN of DynamoDB - EventSourceArn string `json:"eventSourceARN"` + EventSourceArn string `json:"eventSourceARN"` //nolint: stylecheck // Items that are deleted by the Time to Live process after expiration have // the following fields: @@ -63,7 +76,7 @@ type DynamoDBUserIdentity struct { PrincipalID string `json:"principalId"` } -// A description of a single data modification that was performed on an item +// DynamoDBStreamRecord represents a description of a single data modification that was performed on an item // in a DynamoDB table. type DynamoDBStreamRecord struct { diff --git a/vendor/github.com/aws/aws-lambda-go/events/ecr_image_action.go b/vendor/github.com/aws/aws-lambda-go/events/ecr_image_action.go new file mode 100644 index 0000000..3d93676 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/ecr_image_action.go @@ -0,0 +1,23 @@ +package events + +import "time" + +type ECRImageActionEvent struct { + Version string `json:"version"` + ID string `json:"id"` + DetailType string `json:"detail-type"` + Source string `json:"source"` + Account string `json:"account"` + Time time.Time `json:"time"` + Region string `json:"region"` + Resources []string `json:"resources"` + Detail ECRImageActionEventDetailType `json:"detail"` +} + +type ECRImageActionEventDetailType struct { + Result string `json:"result"` + RepositoryName string `json:"repository-name"` + ImageDigest string `json:"image-digest"` + ActionType string `json:"action-type"` + ImageTag string `json:"image-tag"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/ecr_scan.go b/vendor/github.com/aws/aws-lambda-go/events/ecr_scan.go new file mode 100644 index 0000000..d38ad8a --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/ecr_scan.go @@ -0,0 +1,30 @@ +package events + +type ECRScanEvent struct { + Version string `json:"version"` + ID string `json:"id"` + DetailType string `json:"detail-type"` + Source string `json:"source"` + Time string `json:"time"` + Region string `json:"region"` + Resources []string `json:"resources"` + Account string `json:"account"` + Detail ECRScanEventDetailType `json:"detail"` +} + +type ECRScanEventDetailType struct { + ScanStatus string `json:"scan-status"` + RepositoryName string `json:"repository-name"` + FindingSeverityCounts ECRScanEventFindingSeverityCounts `json:"finding-severity-counts"` + ImageDigest string `json:"image-digest"` + ImageTags []string `json:"image-tags"` +} + +type ECRScanEventFindingSeverityCounts struct { + Critical int64 `json:"CRITICAL"` + High int64 `json:"HIGH"` + Medium int64 `json:"MEDIUM"` + Low int64 `json:"LOW"` + Informational int64 `json:"INFORMATIONAL"` + Undefined int64 `json:"UNDEFINED"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/ecs_container_instance.go b/vendor/github.com/aws/aws-lambda-go/events/ecs_container_instance.go new file mode 100644 index 0000000..4ef48b7 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/ecs_container_instance.go @@ -0,0 +1,70 @@ +package events + +import ( + "encoding/json" + "time" +) + +type ECSContainerInstanceEvent struct { + Version string `json:"version"` + ID string `json:"id"` + DetailType string `json:"detail-type"` + Source string `json:"source"` + Account string `json:"account"` + Time time.Time `json:"time"` + Region string `json:"region"` + Resources []string `json:"resources"` + Detail ECSContainerInstanceEventDetailType `json:"detail"` +} + +type ECSContainerInstanceEventDetailType struct { + AgentConnected bool `json:"agentConnected"` + Attributes []ECSContainerInstanceEventAttribute `json:"attributes"` + ClusterARN string `json:"clusterArn"` + ContainerInstanceARN string `json:"containerInstanceArn"` + EC2InstanceID string `json:"ec2InstanceId"` + RegisteredResources []ECSContainerInstanceEventResource `json:"registeredResources"` + RemainingResources []ECSContainerInstanceEventResource `json:"remainingResources"` + Status string `json:"status"` + Version int `json:"version"` + VersionInfo ECSContainerInstanceEventVersionInfo `json:"versionInfo"` + UpdatedAt time.Time `json:"updatedAt"` +} + +type ECSContainerInstanceEventAttribute struct { + Name string `json:"name"` +} + +type ECSContainerInstanceEventResource struct { + Name string `json:"name"` + Type string `json:"type"` + IntegerValue int `json:"integerValue,omitempty"` + StringSetValue []*string `json:"stringSetValue,omitempty"` +} + +type ECSContainerInstanceEventVersionInfo struct { + AgentHash string `json:"agentHash"` + AgentVersion string `json:"agentVersion"` + DockerVersion string `json:"dockerVersion"` +} + +// MarshalJSON implements custom marshaling to marshal the struct into JSON format while preserving an empty string slice in `StringSetValue` field. +func (r ECSContainerInstanceEventResource) MarshalJSON() ([]byte, error) { + type Alias ECSContainerInstanceEventResource + aux := struct { + StringSetValue json.RawMessage `json:"stringSetValue,omitempty"` + Alias + }{ + Alias: (Alias)(r), + } + + if r.StringSetValue != nil { + b, err := json.Marshal(r.StringSetValue) + if err != nil { + return nil, err + } + aux.StringSetValue = b + } + + return json.Marshal(&aux) +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/epoch_time.go b/vendor/github.com/aws/aws-lambda-go/events/epoch_time.go index b0e48a0..59e4e1d 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/epoch_time.go +++ b/vendor/github.com/aws/aws-lambda-go/events/epoch_time.go @@ -7,6 +7,11 @@ import ( "time" ) +// RFC3339EpochTime serializes a time.Time in JSON as an ISO 8601 string. +type RFC3339EpochTime struct { + time.Time +} + // SecondsEpochTime serializes a time.Time in JSON as a UNIX epoch time in seconds type SecondsEpochTime struct { time.Time @@ -57,3 +62,24 @@ func (e *MilliSecondsEpochTime) UnmarshalJSON(b []byte) error { *e = MilliSecondsEpochTime{time.Unix(epoch/1000, (epoch%1000)*1000000)} return nil } + +func (e RFC3339EpochTime) MarshalJSON() ([]byte, error) { + isoTimestampStr := e.Format(time.RFC3339) + return json.Marshal(isoTimestampStr) +} + +func (e *RFC3339EpochTime) UnmarshalJSON(b []byte) error { + var isoTimestampStr string + err := json.Unmarshal(b, &isoTimestampStr) + if err != nil { + return err + } + + parsed, err := time.Parse(time.RFC3339, isoTimestampStr) + if err != nil { + return err + } + + *e = RFC3339EpochTime{parsed} + return nil +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/firehose.go b/vendor/github.com/aws/aws-lambda-go/events/firehose.go index 293974e..85b8fd1 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/firehose.go +++ b/vendor/github.com/aws/aws-lambda-go/events/firehose.go @@ -4,16 +4,18 @@ package events // KinesisFirehoseEvent represents the input event from Amazon Kinesis Firehose. It is used as the input parameter. type KinesisFirehoseEvent struct { - InvocationID string `json:"invocationId"` - DeliveryStreamArn string `json:"deliveryStreamArn"` - Region string `json:"region"` - Records []KinesisFirehoseEventRecord `json:"records"` + InvocationID string `json:"invocationId"` + DeliveryStreamArn string `json:"deliveryStreamArn"` //nolint: stylecheck + SourceKinesisStreamArn string `json:"sourceKinesisStreamArn"` //nolint: stylecheck + Region string `json:"region"` + Records []KinesisFirehoseEventRecord `json:"records"` } type KinesisFirehoseEventRecord struct { - RecordID string `json:"recordId"` - ApproximateArrivalTimestamp MilliSecondsEpochTime `json:"approximateArrivalTimestamp"` - Data []byte `json:"data"` + RecordID string `json:"recordId"` + ApproximateArrivalTimestamp MilliSecondsEpochTime `json:"approximateArrivalTimestamp"` + Data []byte `json:"data"` + KinesisFirehoseRecordMetadata KinesisFirehoseRecordMetadata `json:"kinesisRecordMetadata"` } // Constants used for describing the transformation result @@ -28,7 +30,20 @@ type KinesisFirehoseResponse struct { } type KinesisFirehoseResponseRecord struct { - RecordID string `json:"recordId"` - Result string `json:"result"` // The status of the transformation. May be TransformedStateOk, TransformedStateDropped or TransformedStateProcessingFailed - Data []byte `json:"data"` + RecordID string `json:"recordId"` + Result string `json:"result"` // The status of the transformation. May be TransformedStateOk, TransformedStateDropped or TransformedStateProcessingFailed + Data []byte `json:"data"` + Metadata KinesisFirehoseResponseRecordMetadata `json:"metadata"` +} + +type KinesisFirehoseResponseRecordMetadata struct { + PartitionKeys map[string]string `json:"partitionKeys"` +} + +type KinesisFirehoseRecordMetadata struct { + ShardID string `json:"shardId"` + PartitionKey string `json:"partitionKey"` + SequenceNumber string `json:"sequenceNumber"` + SubsequenceNumber int64 `json:"subsequenceNumber"` + ApproximateArrivalTimestamp MilliSecondsEpochTime `json:"approximateArrivalTimestamp"` } diff --git a/vendor/github.com/aws/aws-lambda-go/events/iam.go b/vendor/github.com/aws/aws-lambda-go/events/iam.go new file mode 100644 index 0000000..090530d --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/iam.go @@ -0,0 +1,14 @@ +package events + +// IAMPolicyDocument represents an IAM policy document. +type IAMPolicyDocument struct { + Version string + Statement []IAMPolicyStatement +} + +// IAMPolicyStatement represents one statement from IAM policy with action, effect and resource. +type IAMPolicyStatement struct { + Action []string + Effect string + Resource []string +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/iot.go b/vendor/github.com/aws/aws-lambda-go/events/iot.go new file mode 100644 index 0000000..afff702 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/iot.go @@ -0,0 +1,46 @@ +package events + +// IoTCoreCustomAuthorizerRequest represents the request to an IoT Core custom authorizer. +// See https://docs.aws.amazon.com/iot/latest/developerguide/config-custom-auth.html +type IoTCoreCustomAuthorizerRequest struct { + Token string `json:"token"` + SignatureVerified bool `json:"signatureVerified"` + Protocols []string `json:"protocols"` + ProtocolData *IoTCoreProtocolData `json:"protocolData,omitempty"` + ConnectionMetadata *IoTCoreConnectionMetadata `json:"connectionMetadata,omitempty"` +} + +type IoTCoreProtocolData struct { + TLS *IoTCoreTLSContext `json:"tls,omitempty"` + HTTP *IoTCoreHTTPContext `json:"http,omitempty"` + MQTT *IoTCoreMQTTContext `json:"mqtt,omitempty"` +} + +type IoTCoreTLSContext struct { + ServerName string `json:"serverName"` +} + +type IoTCoreHTTPContext struct { + Headers map[string]string `json:"headers,omitempty"` + QueryString string `json:"queryString"` +} + +type IoTCoreMQTTContext struct { + ClientID string `json:"clientId"` + Password []byte `json:"password"` + Username string `json:"username"` +} + +type IoTCoreConnectionMetadata struct { + ID string `json:"id"` +} + +// IoTCoreCustomAuthorizerResponse represents the response from an IoT Core custom authorizer. +// See https://docs.aws.amazon.com/iot/latest/developerguide/config-custom-auth.html +type IoTCoreCustomAuthorizerResponse struct { + IsAuthenticated bool `json:"isAuthenticated"` + PrincipalID string `json:"principalId"` + DisconnectAfterInSeconds uint32 `json:"disconnectAfterInSeconds"` + RefreshAfterInSeconds uint32 `json:"refreshAfterInSeconds"` + PolicyDocuments []*IAMPolicyDocument `json:"policyDocuments"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/iot_1_click.go b/vendor/github.com/aws/aws-lambda-go/events/iot_1_click.go new file mode 100644 index 0000000..b2e0d84 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/iot_1_click.go @@ -0,0 +1,34 @@ +// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +package events + +// IoTOneClickEvent represents a click event published by clicking button type +// device. +type IoTOneClickEvent struct { + DeviceEvent IoTOneClickDeviceEvent `json:"deviceEvent"` + DeviceInfo IoTOneClickDeviceInfo `json:"deviceInfo"` + PlacementInfo IoTOneClickPlacementInfo `json:"placementInfo"` +} + +type IoTOneClickDeviceEvent struct { + ButtonClicked IoTOneClickButtonClicked `json:"buttonClicked"` +} + +type IoTOneClickButtonClicked struct { + ClickType string `json:"clickType"` + ReportedTime string `json:"reportedTime"` +} + +type IoTOneClickDeviceInfo struct { + Attributes map[string]string `json:"attributes"` + Type string `json:"type"` + DeviceID string `json:"deviceId"` + RemainingLife float64 `json:"remainingLife"` +} + +type IoTOneClickPlacementInfo struct { + ProjectName string `json:"projectName"` + PlacementName string `json:"placementName"` + Attributes map[string]string `json:"attributes"` + Devices map[string]string `json:"devices"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/iot_deprecated.go b/vendor/github.com/aws/aws-lambda-go/events/iot_deprecated.go new file mode 100644 index 0000000..9330c6b --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/iot_deprecated.go @@ -0,0 +1,30 @@ +package events + +// IoTCustomAuthorizerRequest contains data coming in to a custom IoT device gateway authorizer function. +// Deprecated: Use IoTCoreCustomAuthorizerRequest instead. IoTCustomAuthorizerRequest does not correctly model the request schema +type IoTCustomAuthorizerRequest struct { + HTTPContext *IoTHTTPContext `json:"httpContext,omitempty"` + MQTTContext *IoTMQTTContext `json:"mqttContext,omitempty"` + TLSContext *IoTTLSContext `json:"tlsContext,omitempty"` + AuthorizationToken string `json:"token"` + TokenSignature string `json:"tokenSignature"` +} + +// Deprecated: Use IoTCoreHTTPContext +type IoTHTTPContext IoTCoreHTTPContext + +// Deprecated: Use IoTCoreMQTTContext +type IoTMQTTContext IoTCoreMQTTContext + +// Deprecated: Use IotCoreTLSContext +type IoTTLSContext IoTCoreTLSContext + +// IoTCustomAuthorizerResponse represents the expected format of an IoT device gateway authorization response. +// Deprecated: Use IoTCoreCustomAuthorizerResponse. IoTCustomAuthorizerResponse does not correctly model the response schema. +type IoTCustomAuthorizerResponse struct { + IsAuthenticated bool `json:"isAuthenticated"` + PrincipalID string `json:"principalId"` + DisconnectAfterInSeconds int32 `json:"disconnectAfterInSeconds"` + RefreshAfterInSeconds int32 `json:"refreshAfterInSeconds"` + PolicyDocuments []string `json:"policyDocuments"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/iot_preprovision_hook.go b/vendor/github.com/aws/aws-lambda-go/events/iot_preprovision_hook.go new file mode 100644 index 0000000..fb11569 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/iot_preprovision_hook.go @@ -0,0 +1,19 @@ +package events + +// IoTPreProvisionHookRequest contains the request parameters for the IoT Pre-Provisioning Hook. +// See https://docs.aws.amazon.com/iot/latest/developerguide/pre-provisioning-hook.html +type IoTPreProvisionHookRequest struct { + ClaimCertificateID string `json:"claimCertificateId"` + CertificateID string `json:"certificateId"` + CertificatePEM string `json:"certificatePem"` + TemplateARN string `json:"templateArn"` + ClientID string `json:"clientId"` + Parameters map[string]string `json:"parameters"` +} + +// IoTPreProvisionHookResponse contains the response parameters for the IoT Pre-Provisioning Hook. +// See https://docs.aws.amazon.com/iot/latest/developerguide/pre-provisioning-hook.html +type IoTPreProvisionHookResponse struct { + AllowProvisioning bool `json:"allowProvisioning"` + ParameterOverrides map[string]string `json:"parameterOverrides"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/kafka.go b/vendor/github.com/aws/aws-lambda-go/events/kafka.go new file mode 100644 index 0000000..abbc03f --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/kafka.go @@ -0,0 +1,50 @@ +// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +package events + +import ( + "encoding/json" +) + +type KafkaEvent struct { + EventSource string `json:"eventSource"` + EventSourceARN string `json:"eventSourceArn"` + Records map[string][]KafkaRecord `json:"records"` + BootstrapServers string `json:"bootstrapServers"` +} + +type KafkaRecord struct { + Topic string `json:"topic"` + Partition int64 `json:"partition"` + Offset int64 `json:"offset"` + Timestamp MilliSecondsEpochTime `json:"timestamp"` + TimestampType string `json:"timestampType"` + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` + Headers []map[string]JSONNumberBytes `json:"headers"` +} + +// JSONNumberBytes represents array of bytes in Headers field. +type JSONNumberBytes []byte + +// MarshalJSON converts byte array into array of signed integers. +func (b JSONNumberBytes) MarshalJSON() ([]byte, error) { + signedNumbers := make([]int8, len(b)) + for i, value := range b { + signedNumbers[i] = int8(value) + } + return json.Marshal(signedNumbers) +} + +// UnmarshalJSON converts a given json with potential negative values into byte array. +func (b *JSONNumberBytes) UnmarshalJSON(data []byte) error { + var signedNumbers []int8 + if err := json.Unmarshal(data, &signedNumbers); err != nil { + return err + } + *b = make(JSONNumberBytes, len(signedNumbers)) + for i, value := range signedNumbers { + (*b)[i] = byte(value) + } + return nil +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/kinesis.go b/vendor/github.com/aws/aws-lambda-go/events/kinesis.go index 2b65d76..41bae7a 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/kinesis.go +++ b/vendor/github.com/aws/aws-lambda-go/events/kinesis.go @@ -6,14 +6,27 @@ type KinesisEvent struct { Records []KinesisEventRecord `json:"Records"` } +// KinesisTimeWindowEvent represents an Amazon Dynamodb event when using time windows +// ref. https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-windows +type KinesisTimeWindowEvent struct { + KinesisEvent + TimeWindowProperties +} + +// KinesisTimeWindowEventResponse is the outer structure to report batch item failures for KinesisTimeWindowEvent. +type KinesisTimeWindowEventResponse struct { + TimeWindowEventResponseProperties + BatchItemFailures []KinesisBatchItemFailure `json:"batchItemFailures"` +} + type KinesisEventRecord struct { - AwsRegion string `json:"awsRegion"` + AwsRegion string `json:"awsRegion"` //nolint: stylecheck EventID string `json:"eventID"` EventName string `json:"eventName"` EventSource string `json:"eventSource"` - EventSourceArn string `json:"eventSourceARN"` + EventSourceArn string `json:"eventSourceARN"` //nolint: stylecheck EventVersion string `json:"eventVersion"` - InvokeIdentityArn string `json:"invokeIdentityArn"` + InvokeIdentityArn string `json:"invokeIdentityArn"` //nolint: stylecheck Kinesis KinesisRecord `json:"kinesis"` } diff --git a/vendor/github.com/aws/aws-lambda-go/events/lambda_function_urls.go b/vendor/github.com/aws/aws-lambda-go/events/lambda_function_urls.go new file mode 100644 index 0000000..52a48e8 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/lambda_function_urls.go @@ -0,0 +1,137 @@ +// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +package events + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "net/http" +) + +// LambdaFunctionURLRequest contains data coming from the HTTP request to a Lambda Function URL. +type LambdaFunctionURLRequest struct { + Version string `json:"version"` // Version is expected to be `"2.0"` + RawPath string `json:"rawPath"` + RawQueryString string `json:"rawQueryString"` + Cookies []string `json:"cookies,omitempty"` + Headers map[string]string `json:"headers"` + QueryStringParameters map[string]string `json:"queryStringParameters,omitempty"` + RequestContext LambdaFunctionURLRequestContext `json:"requestContext"` + Body string `json:"body,omitempty"` + IsBase64Encoded bool `json:"isBase64Encoded"` +} + +// LambdaFunctionURLRequestContext contains the information to identify the AWS account and resources invoking the Lambda function. +type LambdaFunctionURLRequestContext struct { + AccountID string `json:"accountId"` + RequestID string `json:"requestId"` + Authorizer *LambdaFunctionURLRequestContextAuthorizerDescription `json:"authorizer,omitempty"` + APIID string `json:"apiId"` // APIID is the Lambda URL ID + DomainName string `json:"domainName"` // DomainName is of the format `".lambda-url..on.aws"` + DomainPrefix string `json:"domainPrefix"` // DomainPrefix is the Lambda URL ID + Time string `json:"time"` + TimeEpoch int64 `json:"timeEpoch"` + HTTP LambdaFunctionURLRequestContextHTTPDescription `json:"http"` +} + +// LambdaFunctionURLRequestContextAuthorizerDescription contains authorizer information for the request context. +type LambdaFunctionURLRequestContextAuthorizerDescription struct { + IAM *LambdaFunctionURLRequestContextAuthorizerIAMDescription `json:"iam,omitempty"` +} + +// LambdaFunctionURLRequestContextAuthorizerIAMDescription contains IAM information for the request context. +type LambdaFunctionURLRequestContextAuthorizerIAMDescription struct { + AccessKey string `json:"accessKey"` + AccountID string `json:"accountId"` + CallerID string `json:"callerId"` + UserARN string `json:"userArn"` + UserID string `json:"userId"` +} + +// LambdaFunctionURLRequestContextHTTPDescription contains HTTP information for the request context. +type LambdaFunctionURLRequestContextHTTPDescription struct { + Method string `json:"method"` + Path string `json:"path"` + Protocol string `json:"protocol"` + SourceIP string `json:"sourceIp"` + UserAgent string `json:"userAgent"` +} + +// LambdaFunctionURLResponse configures the HTTP response to be returned by Lambda Function URL for the request. +type LambdaFunctionURLResponse struct { + StatusCode int `json:"statusCode"` + Headers map[string]string `json:"headers"` + Body string `json:"body"` + IsBase64Encoded bool `json:"isBase64Encoded"` + Cookies []string `json:"cookies"` +} + +// LambdaFunctionURLStreamingResponse models the response to a Lambda Function URL when InvokeMode is RESPONSE_STREAM. +// If the InvokeMode of the Function URL is BUFFERED (default), use LambdaFunctionURLResponse instead. +// +// Example: +// +// lambda.Start(func() (*events.LambdaFunctionURLStreamingResponse, error) { +// return &events.LambdaFunctionURLStreamingResponse{ +// StatusCode: 200, +// Headers: map[string]string{ +// "Content-Type": "text/html", +// }, +// Body: strings.NewReader("Hello World!"), +// }, nil +// }) +// +// Note: This response type requires compiling with `-tags lambda.norpc`, or choosing the `provided` or `provided.al2` runtime. +type LambdaFunctionURLStreamingResponse struct { + prelude *bytes.Buffer + + StatusCode int + Headers map[string]string + Body io.Reader + Cookies []string +} + +func (r *LambdaFunctionURLStreamingResponse) Read(p []byte) (n int, err error) { + if r.prelude == nil { + if r.StatusCode == 0 { + r.StatusCode = http.StatusOK + } + b, err := json.Marshal(struct { + StatusCode int `json:"statusCode"` + Headers map[string]string `json:"headers,omitempty"` + Cookies []string `json:"cookies,omitempty"` + }{ + StatusCode: r.StatusCode, + Headers: r.Headers, + Cookies: r.Cookies, + }) + if err != nil { + return 0, err + } + r.prelude = bytes.NewBuffer(append(b, 0, 0, 0, 0, 0, 0, 0, 0)) + } + if r.prelude.Len() > 0 { + return r.prelude.Read(p) + } + if r.Body == nil { + return 0, io.EOF + } + return r.Body.Read(p) +} + +func (r *LambdaFunctionURLStreamingResponse) Close() error { + if closer, ok := r.Body.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +func (r *LambdaFunctionURLStreamingResponse) MarshalJSON() ([]byte, error) { + return nil, errors.New("not json") +} + +func (r *LambdaFunctionURLStreamingResponse) ContentType() string { + return "application/vnd.awslambda.http-integration-response" +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/lex.go b/vendor/github.com/aws/aws-lambda-go/events/lex.go index 28590a8..c3ca21f 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/lex.go +++ b/vendor/github.com/aws/aws-lambda-go/events/lex.go @@ -1,16 +1,18 @@ package events type LexEvent struct { - MessageVersion string `json:"messageVersion,omitempty"` - InvocationSource string `json:"invocationSource,omitempty"` - UserID string `json:"userId,omitempty"` - InputTranscript string `json:"inputTranscript,omitempty"` - SessionAttributes map[string]string `json:"sessionAttributes,omitempty"` - RequestAttributes map[string]string `json:"requestAttributes,omitempty"` - Bot *LexBot `json:"bot,omitempty"` - OutputDialogMode string `json:"outputDialogMode,omitempty"` - CurrentIntent *LexCurrentIntent `json:"currentIntent,omitempty"` - DialogAction *LexDialogAction `json:"dialogAction,omitempty"` + MessageVersion string `json:"messageVersion,omitempty"` + InvocationSource string `json:"invocationSource,omitempty"` + UserID string `json:"userId,omitempty"` + InputTranscript string `json:"inputTranscript,omitempty"` + SessionAttributes SessionAttributes `json:"sessionAttributes,omitempty"` + RequestAttributes map[string]string `json:"requestAttributes,omitempty"` + Bot *LexBot `json:"bot,omitempty"` + OutputDialogMode string `json:"outputDialogMode,omitempty"` + CurrentIntent *LexCurrentIntent `json:"currentIntent,omitempty"` + AlternativeIntents []LexAlternativeIntents `json:"alternativeIntents,omitempty"` + // Deprecated: the DialogAction field is never populated by Lex events + DialogAction *LexDialogAction `json:"dialogAction,omitempty"` } type LexBot struct { @@ -20,10 +22,19 @@ type LexBot struct { } type LexCurrentIntent struct { - Name string `json:"name,omitempty"` - Slots Slots `json:"slots,omitempty"` - SlotDetails map[string]SlotDetail `json:"slotDetails,omitempty"` - ConfirmationStatus string `json:"confirmationStatus,omitempty"` + Name string `json:"name,omitempty"` + NLUIntentConfidenceScore float64 `json:"nluIntentConfidenceScore,omitempty"` + Slots Slots `json:"slots,omitempty"` + SlotDetails map[string]SlotDetail `json:"slotDetails,omitempty"` + ConfirmationStatus string `json:"confirmationStatus,omitempty"` +} + +type LexAlternativeIntents struct { + Name string `json:"name,omitempty"` + NLUIntentConfidenceScore float64 `json:"nluIntentConfidenceScore,omitempty"` + Slots Slots `json:"slots,omitempty"` + SlotDetails map[string]SlotDetail `json:"slotDetails,omitempty"` + ConfirmationStatus string `json:"confirmationStatus,omitempty"` } type SlotDetail struct { @@ -41,8 +52,15 @@ type LexDialogAction struct { ResponseCard *LexResponseCard `json:"responseCard,omitempty"` } +type SessionAttributes map[string]string + type Slots map[string]*string +type LexResponse struct { + SessionAttributes SessionAttributes `json:"sessionAttributes"` + DialogAction LexDialogAction `json:"dialogAction,omitempty"` +} + type LexResponseCard struct { Version int64 `json:"version,omitempty"` ContentType string `json:"contentType,omitempty"` @@ -60,5 +78,4 @@ type Attachment struct { func (h *LexEvent) Clear() { h.Bot = nil h.CurrentIntent = nil - h.DialogAction = nil } diff --git a/vendor/github.com/aws/aws-lambda-go/events/rabbitmq.go b/vendor/github.com/aws/aws-lambda-go/events/rabbitmq.go new file mode 100644 index 0000000..ce79f64 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/rabbitmq.go @@ -0,0 +1,31 @@ +package events + +type RabbitMQEvent struct { + EventSource string `json:"eventSource"` + EventSourceARN string `json:"eventSourceArn"` + MessagesByQueue map[string][]RabbitMQMessage `json:"rmqMessagesByQueue"` +} + +type RabbitMQMessage struct { + BasicProperties RabbitMQBasicProperties `json:"basicProperties"` + Data string `json:"data"` + Redelivered bool `json:"redelivered"` +} + +type RabbitMQBasicProperties struct { + ContentType string `json:"contentType"` + ContentEncoding *string `json:"contentEncoding"` + Headers map[string]interface{} `json:"headers"` // Application or header exchange table + DeliveryMode uint8 `json:"deliveryMode"` + Priority uint8 `json:"priority"` + CorrelationID *string `json:"correlationId"` + ReplyTo *string `json:"replyTo"` + Expiration string `json:"expiration"` + MessageID *string `json:"messageId"` + Timestamp string `json:"timestamp"` + Type *string `json:"type"` + UserID string `json:"userId"` + AppID *string `json:"appId"` + ClusterID *string `json:"clusterId"` + BodySize uint64 `json:"bodySize"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/s3.go b/vendor/github.com/aws/aws-lambda-go/events/s3.go index 5af3493..74c9daa 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/s3.go +++ b/vendor/github.com/aws/aws-lambda-go/events/s3.go @@ -3,13 +3,17 @@ package events import ( + "encoding/json" + "net/url" "time" ) +// S3Event which wrap an array of S3EventRecord type S3Event struct { Records []S3EventRecord `json:"Records"` } +// S3EventRecord which wrap record data type S3EventRecord struct { EventVersion string `json:"eventVersion"` EventSource string `json:"eventSource"` @@ -40,18 +44,32 @@ type S3Entity struct { type S3Bucket struct { Name string `json:"name"` OwnerIdentity S3UserIdentity `json:"ownerIdentity"` - Arn string `json:"arn"` + Arn string `json:"arn"` //nolint: stylecheck } type S3Object struct { Key string `json:"key"` - Size int64 `json:"size"` + Size int64 `json:"size,omitempty"` URLDecodedKey string `json:"urlDecodedKey"` VersionID string `json:"versionId"` ETag string `json:"eTag"` Sequencer string `json:"sequencer"` } +func (o *S3Object) UnmarshalJSON(data []byte) error { + type rawS3Object S3Object + if err := json.Unmarshal(data, (*rawS3Object)(o)); err != nil { + return err + } + key, err := url.QueryUnescape(o.Key) + if err != nil { + return err + } + o.URLDecodedKey = key + + return nil +} + type S3TestEvent struct { Service string `json:"Service"` Bucket string `json:"Bucket"` diff --git a/vendor/github.com/aws/aws-lambda-go/events/s3_batch_job.go b/vendor/github.com/aws/aws-lambda-go/events/s3_batch_job.go new file mode 100644 index 0000000..183ec90 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/s3_batch_job.go @@ -0,0 +1,61 @@ +// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +package events + +// S3BatchJobEvent encapsulates the detail of a s3 batch job +type S3BatchJobEvent struct { + InvocationSchemaVersion string `json:"invocationSchemaVersion"` + InvocationID string `json:"invocationId"` + Job S3BatchJob `json:"job"` + Tasks []S3BatchJobTask `json:"tasks"` +} + +// S3BatchJob whichs have the job id +type S3BatchJob struct { + ID string `json:"id"` +} + +// S3BatchJobTask represents one task in the s3 batch job and have all task details +type S3BatchJobTask struct { + TaskID string `json:"taskId"` + S3Key string `json:"s3Key"` + S3VersionID string `json:"s3VersionId"` + S3BucketARN string `json:"s3BucketArn"` +} + +// S3BatchJobEventV2 encapsulates the detail of a s3 batch job +type S3BatchJobEventV2 struct { + InvocationSchemaVersion string `json:"invocationSchemaVersion"` + InvocationID string `json:"invocationId"` + Job S3BatchJobV2 `json:"job"` + Tasks []S3BatchJobTaskV2 `json:"tasks"` +} + +// S3BatchJobV2 whichs have the job id +type S3BatchJobV2 struct { + ID string `json:"id"` + UserArguments map[string]string `json:"userArguments"` +} + +// S3BatchJobTaskV2 represents one task in the s3 batch job and have all task details +type S3BatchJobTaskV2 struct { + TaskID string `json:"taskId"` + S3Key string `json:"s3Key"` + S3VersionID string `json:"s3VersionId"` + S3Bucket string `json:"s3Bucket"` +} + +// S3BatchJobResponse is the response of a iven s3 batch job with the results +type S3BatchJobResponse struct { + InvocationSchemaVersion string `json:"invocationSchemaVersion"` + TreatMissingKeysAs string `json:"treatMissingKeysAs"` + InvocationID string `json:"invocationId"` + Results []S3BatchJobResult `json:"results"` +} + +// S3BatchJobResult represents the result of a given task +type S3BatchJobResult struct { + TaskID string `json:"taskId"` + ResultCode string `json:"resultCode"` + ResultString string `json:"resultString"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/s3_object_lambda.go b/vendor/github.com/aws/aws-lambda-go/events/s3_object_lambda.go new file mode 100644 index 0000000..47a28b0 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/s3_object_lambda.go @@ -0,0 +1,64 @@ +package events + +type S3ObjectLambdaEvent struct { + XAmzRequestID string `json:"xAmzRequestId"` + GetObjectContext *S3ObjectLambdaGetObjectContext `json:"getObjectContext,omitempty"` + ListObjectsContext *S3ObjectLambdaListObjectsContext `json:"listObjectsContext,omitempty"` + ListObjectsV2Context *S3ObjectLambdaListObjectsV2Context `json:"listObjectsV2Context,omitempty"` + HeadObjectContext *S3ObjectLambdaHeadObjectContext `json:"headObjectContext,omitempty"` + Configuration S3ObjectLambdaConfiguration `json:"configuration"` + UserRequest S3ObjectLambdaUserRequest `json:"userRequest"` + UserIdentity S3ObjectLambdaUserIdentity `json:"userIdentity"` + ProtocolVersion string `json:"protocolVersion"` +} + +type S3ObjectLambdaGetObjectContext struct { + InputS3URL string `json:"inputS3Url"` + OutputRoute string `json:"outputRoute"` + OutputToken string `json:"outputToken"` +} + +type S3ObjectLambdaListObjectsContext struct { + InputS3URL string `json:"inputS3Url"` +} + +type S3ObjectLambdaListObjectsV2Context struct { + InputS3URL string `json:"inputS3Url"` +} + +type S3ObjectLambdaHeadObjectContext struct { + InputS3URL string `json:"inputS3Url"` +} + +type S3ObjectLambdaConfiguration struct { + AccessPointARN string `json:"accessPointArn"` + SupportingAccessPointARN string `json:"supportingAccessPointArn"` + Payload string `json:"payload"` +} + +type S3ObjectLambdaUserRequest struct { + URL string `json:"url"` + Headers map[string]string `json:"headers"` +} + +type S3ObjectLambdaUserIdentity struct { + Type string `json:"type"` + PrincipalID string `json:"principalId"` + ARN string `json:"arn"` + AccountID string `json:"accountId"` + AccessKeyID string `json:"accessKeyId"` + SessionContext *S3ObjectLambdaSessionContext `json:"sessionContext,omitempty"` +} + +type S3ObjectLambdaSessionContext struct { + Attributes map[string]string `json:"attributes"` + SessionIssuer *S3ObjectLambdaSessionIssuer `json:"sessionIssuer,omitempty"` +} + +type S3ObjectLambdaSessionIssuer struct { + Type string `json:"type"` + PrincipalID string `json:"principalId"` + ARN string `json:"arn"` + AccountID string `json:"accountId"` + UserName string `json:"userName"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/secretsmanager.go b/vendor/github.com/aws/aws-lambda-go/events/secretsmanager.go new file mode 100644 index 0000000..fd2bea1 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/secretsmanager.go @@ -0,0 +1,11 @@ +package events + +// SecretsManagerSecretRotationEvent is the event passed to a Lambda function to handle +// automatic secret rotation. +// +// https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html#rotate-secrets_how +type SecretsManagerSecretRotationEvent struct { + Step string `json:"Step"` + SecretID string `json:"SecretId"` + ClientRequestToken string `json:"ClientRequestToken"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/ses.go b/vendor/github.com/aws/aws-lambda-go/events/ses.go index 793b448..ff89985 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/ses.go +++ b/vendor/github.com/aws/aws-lambda-go/events/ses.go @@ -55,10 +55,22 @@ type SimpleEmailCommonHeaders struct { Subject string `json:"subject"` } +// SimpleEmailReceiptAction is a logical union of fields present in all action +// Types. For example, the FunctionARN and InvocationType fields are only +// present for the Lambda Type, and the BucketName and ObjectKey fields are only +// present for the S3 Type. type SimpleEmailReceiptAction struct { - Type string `json:"type"` - InvocationType string `json:"invocationType"` - FunctionArn string `json:"functionArn"` + Type string `json:"type"` + TopicARN string `json:"topicArn,omitempty"` + BucketName string `json:"bucketName,omitempty"` + ObjectKey string `json:"objectKey,omitempty"` + SMTPReplyCode string `json:"smtpReplyCode,omitempty"` + StatusCode string `json:"statusCode,omitempty"` + Message string `json:"message,omitempty"` + Sender string `json:"sender,omitempty"` + InvocationType string `json:"invocationType,omitempty"` + FunctionARN string `json:"functionArn,omitempty"` + OrganizationARN string `json:"organizationArn,omitempty"` } type SimpleEmailVerdict struct { diff --git a/vendor/github.com/aws/aws-lambda-go/events/sns.go b/vendor/github.com/aws/aws-lambda-go/events/sns.go index 9b00bd2..6c75df7 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/sns.go +++ b/vendor/github.com/aws/aws-lambda-go/events/sns.go @@ -12,7 +12,7 @@ type SNSEvent struct { type SNSEventRecord struct { EventVersion string `json:"EventVersion"` - EventSubscriptionArn string `json:"EventSubscriptionArn"` + EventSubscriptionArn string `json:"EventSubscriptionArn"` //nolint: stylecheck EventSource string `json:"EventSource"` SNS SNSEntity `json:"Sns"` } @@ -21,7 +21,7 @@ type SNSEntity struct { Signature string `json:"Signature"` MessageID string `json:"MessageId"` Type string `json:"Type"` - TopicArn string `json:"TopicArn"` + TopicArn string `json:"TopicArn"` //nolint: stylecheck MessageAttributes map[string]interface{} `json:"MessageAttributes"` SignatureVersion string `json:"SignatureVersion"` Timestamp time.Time `json:"Timestamp"` @@ -30,3 +30,59 @@ type SNSEntity struct { UnsubscribeURL string `json:"UnsubscribeUrl"` Subject string `json:"Subject"` } + +type CloudWatchAlarmSNSPayload struct { + AlarmName string `json:"AlarmName"` + AlarmDescription string `json:"AlarmDescription"` + AWSAccountID string `json:"AWSAccountId"` + NewStateValue string `json:"NewStateValue"` + NewStateReason string `json:"NewStateReason"` + StateChangeTime string `json:"StateChangeTime"` + Region string `json:"Region"` + AlarmARN string `json:"AlarmArn"` + OldStateValue string `json:"OldStateValue"` + Trigger CloudWatchAlarmTrigger `json:"Trigger"` +} + +type CloudWatchAlarmTrigger struct { + Period int64 `json:"Period"` + EvaluationPeriods int64 `json:"EvaluationPeriods"` + ComparisonOperator string `json:"ComparisonOperator"` + Threshold float64 `json:"Threshold"` + TreatMissingData string `json:"TreatMissingData"` + EvaluateLowSampleCountPercentile string `json:"EvaluateLowSampleCountPercentile"` + Metrics []CloudWatchMetricDataQuery `json:"Metrics,omitempty"` + MetricName string `json:"MetricName,omitempty"` + Namespace string `json:"Namespace,omitempty"` + StatisticType string `json:"StatisticType,omitempty"` + Statistic string `json:"Statistic,omitempty"` + Unit string `json:"Unit,omitempty"` + Dimensions []CloudWatchDimension `json:"Dimensions,omitempty"` +} + +type CloudWatchMetricDataQuery struct { + Expression string `json:"Expression,omitempty"` + ID string `json:"Id"` + Label string `json:"Label,omitempty"` + MetricStat CloudWatchMetricStat `json:"MetricStat,omitempty"` + Period int64 `json:"Period,omitempty"` + ReturnData bool `json:"ReturnData,omitempty"` +} + +type CloudWatchMetricStat struct { + Metric CloudWatchMetric `json:"Metric"` + Period int64 `json:"Period"` + Stat string `json:"Stat"` + Unit string `json:"Unit,omitempty"` +} + +type CloudWatchMetric struct { + Dimensions []CloudWatchDimension `json:"Dimensions,omitempty"` + MetricName string `json:"MetricName,omitempty"` + Namespace string `json:"Namespace,omitempty"` +} + +type CloudWatchDimension struct { + Name string `json:"name"` + Value string `json:"value"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/sqs.go b/vendor/github.com/aws/aws-lambda-go/events/sqs.go index ea1bf7b..1071dfe 100644 --- a/vendor/github.com/aws/aws-lambda-go/events/sqs.go +++ b/vendor/github.com/aws/aws-lambda-go/events/sqs.go @@ -7,7 +7,7 @@ type SQSEvent struct { } type SQSMessage struct { - MessageId string `json:"messageId"` + MessageId string `json:"messageId"` //nolint: stylecheck ReceiptHandle string `json:"receiptHandle"` Body string `json:"body"` Md5OfBody string `json:"md5OfBody"` diff --git a/vendor/github.com/aws/aws-lambda-go/events/streams.go b/vendor/github.com/aws/aws-lambda-go/events/streams.go new file mode 100644 index 0000000..804b66b --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/streams.go @@ -0,0 +1,31 @@ +package events + +// KinesisEventResponse is the outer structure to report batch item failures for KinesisEvent. +type KinesisEventResponse struct { + BatchItemFailures []KinesisBatchItemFailure `json:"batchItemFailures"` +} + +// KinesisBatchItemFailure is the individual record which failed processing. +type KinesisBatchItemFailure struct { + ItemIdentifier string `json:"itemIdentifier"` +} + +// DynamoDBEventResponse is the outer structure to report batch item failures for DynamoDBEvent. +type DynamoDBEventResponse struct { + BatchItemFailures []DynamoDBBatchItemFailure `json:"batchItemFailures"` +} + +// DynamoDBBatchItemFailure is the individual record which failed processing. +type DynamoDBBatchItemFailure struct { + ItemIdentifier string `json:"itemIdentifier"` +} + +// SQSEventResponse is the outer structure to report batch item failures for SQSEvent. +type SQSEventResponse struct { + BatchItemFailures []SQSBatchItemFailure `json:"batchItemFailures"` +} + +// SQSBatchItemFailure is the individual record which failed processing. +type SQSBatchItemFailure struct { + ItemIdentifier string `json:"itemIdentifier"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/events/time_window.go b/vendor/github.com/aws/aws-lambda-go/events/time_window.go new file mode 100644 index 0000000..c98ea97 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/events/time_window.go @@ -0,0 +1,42 @@ +package events + +// Window is the object that captures the time window for the records in the event when using the tumbling windows feature +// Kinesis: https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-windows +// DDB: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-windows +type Window struct { + Start RFC3339EpochTime `json:"start"` + End RFC3339EpochTime `json:"end"` +} + +// TimeWindowProperties is the object that captures properties that relate to the tumbling windows feature +// Kinesis: https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-windows +// DDB: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-windows +type TimeWindowProperties struct { + // Time window for the records in the event. + Window Window `json:"window"` + + // State being built up to this invoke in the time window. + State map[string]string `json:"state"` + + // Shard id of the records + ShardID string `json:"shardId"` + + // The event source ARN of the service that generated the event (eg. DynamoDB or Kinesis) + EventSourceARN string `json:"eventSourceARN"` + + // Set to true for the last invoke of the time window. + // Subsequent invoke will start a new time window along with a fresh state. + IsFinalInvokeForWindow bool `json:"isFinalInvokeForWindow"` + + // Set to true if window is terminated prematurely. + // Subsequent invoke will continue the same window with a fresh state. + IsWindowTerminatedEarly bool `json:"isWindowTerminatedEarly"` +} + +// TimeWindowEventResponseProperties is the object that captures response properties that relate to the tumbling windows feature +// Kinesis: https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-windows +// DDB: https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-windows +type TimeWindowEventResponseProperties struct { + // State being built up to this invoke in the time window. + State map[string]string `json:"state"` +} diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/README.md b/vendor/github.com/aws/aws-lambda-go/lambda/README.md index 76f8679..dcaca6d 100644 --- a/vendor/github.com/aws/aws-lambda-go/lambda/README.md +++ b/vendor/github.com/aws/aws-lambda-go/lambda/README.md @@ -1,3 +1,3 @@ # Overview -[![GoDoc](https://godoc.org/github.com/aws/aws-lambda-go/lambda?status.svg)](https://godoc.org/github.com/aws/aws-lambda-go/lambda) +[![Go Reference](https://pkg.go.dev/badge/github.com/aws/aws-lambda-go/lambda.svg)](https://pkg.go.dev/github.com/aws/aws-lambda-go/lambda) diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/entry.go b/vendor/github.com/aws/aws-lambda-go/lambda/entry.go index f55e61a..da82fab 100644 --- a/vendor/github.com/aws/aws-lambda-go/lambda/entry.go +++ b/vendor/github.com/aws/aws-lambda-go/lambda/entry.go @@ -3,9 +3,8 @@ package lambda import ( + "context" "log" - "net" - "net/rpc" "os" ) @@ -15,30 +14,42 @@ import ( // // Rules: // -// * handler must be a function -// * handler may take between 0 and two arguments. -// * if there are two arguments, the first argument must satisfy the "context.Context" interface. -// * handler may return between 0 and two arguments. -// * if there are two return values, the second argument must be an error. -// * if there is one return value it must be an error. +// - handler must be a function +// - handler may take between 0 and two arguments. +// - if there are two arguments, the first argument must satisfy the "context.Context" interface. +// - handler may return between 0 and two values. +// - if there are two return values, the second return value must be an error. +// - if there is one return value it must be an error. // // Valid function signatures: // -// func () -// func () error -// func (TIn) error -// func () (TOut, error) -// func (TIn) (TOut, error) -// func (context.Context) error -// func (context.Context, TIn) error -// func (context.Context) (TOut, error) -// func (context.Context, TIn) (TOut, error) +// func () +// func (TIn) +// func () error +// func (TIn) error +// func () (TOut, error) +// func (TIn) (TOut, error) +// func (context.Context) +// func (context.Context) error +// func (context.Context) (TOut, error) +// func (context.Context, TIn) +// func (context.Context, TIn) error +// func (context.Context, TIn) (TOut, error) // // Where "TIn" and "TOut" are types compatible with the "encoding/json" standard library. // See https://golang.org/pkg/encoding/json/#Unmarshal for how deserialization behaves +// +// "TOut" may also implement the io.Reader interface. +// If "TOut" is both json serializable and implements io.Reader, then the json serialization is used. func Start(handler interface{}) { - wrappedHandler := NewHandler(handler) - StartHandler(wrappedHandler) + StartWithOptions(handler) +} + +// StartWithContext is the same as Start except sets the base context for the function. +// +// Deprecated: use lambda.StartWithOptions(handler, lambda.WithContext(ctx)) instead +func StartWithContext(ctx context.Context, handler interface{}) { + StartWithOptions(handler, WithContext(ctx)) } // StartHandler takes in a Handler wrapper interface which can be implemented either by a @@ -46,19 +57,57 @@ func Start(handler interface{}) { // // Handler implementation requires a single "Invoke()" function: // -// func Invoke(context.Context, []byte) ([]byte, error) +// func Invoke(context.Context, []byte) ([]byte, error) +// +// Deprecated: use lambda.Start(handler) instead func StartHandler(handler Handler) { - port := os.Getenv("_LAMBDA_SERVER_PORT") - lis, err := net.Listen("tcp", "localhost:"+port) - if err != nil { - log.Fatal(err) + StartWithOptions(handler) +} + +// StartWithOptions is the same as Start after the application of any handler options specified +func StartWithOptions(handler interface{}, options ...Option) { + start(newHandler(handler, options...)) +} + +type startFunction struct { + env string + f func(envValue string, handler Handler) error +} + +var ( + runtimeAPIStartFunction = &startFunction{ + env: "AWS_LAMBDA_RUNTIME_API", + f: startRuntimeAPILoop, } - function := new(Function) - function.handler = handler - err = rpc.Register(function) - if err != nil { - log.Fatal("failed to register handler function") + startFunctions = []*startFunction{runtimeAPIStartFunction} + + // This allows end to end testing of the Start functions, by tests overwriting this function to keep the program alive + logFatalf = log.Fatalf +) + +// StartHandlerWithContext is the same as StartHandler except sets the base context for the function. +// +// Handler implementation requires a single "Invoke()" function: +// +// func Invoke(context.Context, []byte) ([]byte, error) +// +// Deprecated: use lambda.StartWithOptions(handler, lambda.WithContext(ctx)) instead +func StartHandlerWithContext(ctx context.Context, handler Handler) { + StartWithOptions(handler, WithContext(ctx)) +} + +func start(handler *handlerOptions) { + var keys []string + for _, start := range startFunctions { + config := os.Getenv(start.env) + if config != "" { + // in normal operation, the start function never returns + // if it does, exit!, this triggers a restart of the lambda function + err := start.f(config, handler) + logFatalf("%v", err) + } + keys = append(keys, start.env) } - rpc.Accept(lis) - log.Fatal("accept should not have returned") + logFatalf("expected AWS Lambda environment variables %s are not defined", keys) + } diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/entry_generic.go b/vendor/github.com/aws/aws-lambda-go/lambda/entry_generic.go new file mode 100644 index 0000000..2970838 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/lambda/entry_generic.go @@ -0,0 +1,21 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved + +package lambda + +import ( + "context" +) + +// HandlerFunc represents a valid input with two arguments and two returns as described by Start +type HandlerFunc[TIn, TOut any] interface { + func(context.Context, TIn) (TOut, error) +} + +// StartHandlerFunc is the same as StartWithOptions except that it takes a generic input +// so that the function signature can be validated at compile time. +func StartHandlerFunc[TIn any, TOut any, H HandlerFunc[TIn, TOut]](handler H, options ...Option) { + start(newHandler(handler, options...)) +} diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/errors.go b/vendor/github.com/aws/aws-lambda-go/lambda/errors.go new file mode 100644 index 0000000..5d482d0 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/lambda/errors.go @@ -0,0 +1,46 @@ +// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved + +package lambda + +import ( + "reflect" + + "github.com/aws/aws-lambda-go/lambda/messages" +) + +func getErrorType(err interface{}) string { + errorType := reflect.TypeOf(err) + if errorType.Kind() == reflect.Ptr { + return errorType.Elem().Name() + } + return errorType.Name() +} + +func lambdaErrorResponse(invokeError error) *messages.InvokeResponse_Error { + if ive, ok := invokeError.(messages.InvokeResponse_Error); ok { + return &ive + } + var errorName string + if errorType := reflect.TypeOf(invokeError); errorType.Kind() == reflect.Ptr { + errorName = errorType.Elem().Name() + } else { + errorName = errorType.Name() + } + return &messages.InvokeResponse_Error{ + Message: invokeError.Error(), + Type: errorName, + } +} + +func lambdaPanicResponse(err interface{}) *messages.InvokeResponse_Error { + if ive, ok := err.(messages.InvokeResponse_Error); ok { + return &ive + } + panicInfo := getPanicInfo(err) + return &messages.InvokeResponse_Error{ + Message: panicInfo.Message, + Type: getErrorType(err), + StackTrace: panicInfo.StackTrace, + ShouldExit: true, + } +} diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/extensions_api_client.go b/vendor/github.com/aws/aws-lambda-go/lambda/extensions_api_client.go new file mode 100644 index 0000000..c970389 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/lambda/extensions_api_client.go @@ -0,0 +1,90 @@ +package lambda + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" //nolint: staticcheck + "net/http" +) + +const ( + headerExtensionName = "Lambda-Extension-Name" + headerExtensionIdentifier = "Lambda-Extension-Identifier" + extensionAPIVersion = "2020-01-01" +) + +type extensionAPIEventType string + +const ( + extensionInvokeEvent extensionAPIEventType = "INVOKE" //nolint:deadcode,unused,varcheck + extensionShutdownEvent extensionAPIEventType = "SHUTDOWN" //nolint:deadcode,unused,varcheck +) + +type extensionAPIClient struct { + baseURL string + httpClient *http.Client +} + +func newExtensionAPIClient(address string) *extensionAPIClient { + client := &http.Client{ + Timeout: 0, // connections to the extensions API are never expected to time out + } + endpoint := "http://" + address + "/" + extensionAPIVersion + "/extension/" + return &extensionAPIClient{ + baseURL: endpoint, + httpClient: client, + } +} + +func (c *extensionAPIClient) register(name string, events ...extensionAPIEventType) (string, error) { + url := c.baseURL + "register" + body, _ := json.Marshal(struct { + Events []extensionAPIEventType `json:"events"` + }{ + Events: events, + }) + + req, _ := http.NewRequest(http.MethodPost, url, bytes.NewReader(body)) + req.Header.Add(headerExtensionName, name) + res, err := c.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("failed to register extension: %v", err) + } + defer res.Body.Close() + _, _ = io.Copy(ioutil.Discard, res.Body) + + if res.StatusCode != http.StatusOK { + return "", fmt.Errorf("failed to register extension, got response status: %d %s", res.StatusCode, http.StatusText(res.StatusCode)) + } + + return res.Header.Get(headerExtensionIdentifier), nil +} + +type extensionEventResponse struct { + EventType extensionAPIEventType + // ... the rest not implemented +} + +func (c *extensionAPIClient) next(id string) (response extensionEventResponse, err error) { + url := c.baseURL + "event/next" + + req, _ := http.NewRequest(http.MethodGet, url, nil) + req.Header.Add(headerExtensionIdentifier, id) + res, err := c.httpClient.Do(req) + if err != nil { + err = fmt.Errorf("failed to get extension event: %v", err) + return + } + defer res.Body.Close() + _, _ = io.Copy(ioutil.Discard, res.Body) + + if res.StatusCode != http.StatusOK { + err = fmt.Errorf("failed to register extension, got response status: %d %s", res.StatusCode, http.StatusText(res.StatusCode)) + return + } + + err = json.NewDecoder(res.Body).Decode(&response) + return +} diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/function.go b/vendor/github.com/aws/aws-lambda-go/lambda/function.go deleted file mode 100644 index 6a2e337..0000000 --- a/vendor/github.com/aws/aws-lambda-go/lambda/function.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - -package lambda - -import ( - "context" - "encoding/json" - "reflect" - "time" - - "github.com/aws/aws-lambda-go/lambda/messages" - "github.com/aws/aws-lambda-go/lambdacontext" -) - -type Function struct { - handler Handler -} - -func (fn *Function) Ping(req *messages.PingRequest, response *messages.PingResponse) error { - *response = messages.PingResponse{} - return nil -} - -func (fn *Function) Invoke(req *messages.InvokeRequest, response *messages.InvokeResponse) error { - defer func() { - if err := recover(); err != nil { - panicInfo := getPanicInfo(err) - response.Error = &messages.InvokeResponse_Error{ - Message: panicInfo.Message, - Type: getErrorType(err), - StackTrace: panicInfo.StackTrace, - ShouldExit: true, - } - } - }() - - deadline := time.Unix(req.Deadline.Seconds, req.Deadline.Nanos).UTC() - invokeContext, cancel := context.WithDeadline(context.Background(), deadline) - defer cancel() - - lc := &lambdacontext.LambdaContext{ - AwsRequestID: req.RequestId, - InvokedFunctionArn: req.InvokedFunctionArn, - Identity: lambdacontext.CognitoIdentity{ - CognitoIdentityID: req.CognitoIdentityId, - CognitoIdentityPoolID: req.CognitoIdentityPoolId, - }, - } - if len(req.ClientContext) > 0 { - if err := json.Unmarshal(req.ClientContext, &lc.ClientContext); err != nil { - response.Error = lambdaErrorResponse(err) - return nil - } - } - invokeContext = lambdacontext.NewContext(invokeContext, lc) - - invokeContext = context.WithValue(invokeContext, "x-amzn-trace-id", req.XAmznTraceId) - - payload, err := fn.handler.Invoke(invokeContext, req.Payload) - if err != nil { - response.Error = lambdaErrorResponse(err) - return nil - } - response.Payload = payload - return nil -} - -func getErrorType(err interface{}) string { - errorType := reflect.TypeOf(err) - if errorType.Kind() == reflect.Ptr { - return errorType.Elem().Name() - } - return errorType.Name() -} - -func lambdaErrorResponse(invokeError error) *messages.InvokeResponse_Error { - var errorName string - if errorType := reflect.TypeOf(invokeError); errorType.Kind() == reflect.Ptr { - errorName = errorType.Elem().Name() - } else { - errorName = errorType.Name() - } - return &messages.InvokeResponse_Error{ - Message: invokeError.Error(), - Type: errorName, - } -} diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/handler.go b/vendor/github.com/aws/aws-lambda-go/lambda/handler.go index 96eeac2..c455656 100644 --- a/vendor/github.com/aws/aws-lambda-go/lambda/handler.go +++ b/vendor/github.com/aws/aws-lambda-go/lambda/handler.go @@ -3,10 +3,15 @@ package lambda import ( + "bytes" "context" "encoding/json" + "errors" "fmt" + "io" + "io/ioutil" // nolint:staticcheck "reflect" + "strings" "github.com/aws/aws-lambda-go/lambda/handlertrace" ) @@ -15,79 +20,296 @@ type Handler interface { Invoke(ctx context.Context, payload []byte) ([]byte, error) } -// lambdaHandler is the generic function type -type lambdaHandler func(context.Context, []byte) (interface{}, error) +type handlerOptions struct { + handlerFunc + baseContext context.Context + contextValues map[interface{}]interface{} + jsonRequestUseNumber bool + jsonRequestDisallowUnknownFields bool + jsonResponseEscapeHTML bool + jsonResponseIndentPrefix string + jsonResponseIndentValue string + enableSIGTERM bool + sigtermCallbacks []func() +} -// Invoke calls the handler, and serializes the response. -// If the underlying handler returned an error, or an error occurs during serialization, error is returned. -func (handler lambdaHandler) Invoke(ctx context.Context, payload []byte) ([]byte, error) { - response, err := handler(ctx, payload) - if err != nil { - return nil, err - } +type Option func(*handlerOptions) - responseBytes, err := json.Marshal(response) - if err != nil { - return nil, err - } +// WithContext is a HandlerOption that sets the base context for all invocations of the handler. +// +// Usage: +// +// lambda.StartWithOptions( +// func (ctx context.Context) (string, error) { +// return ctx.Value("foo"), nil +// }, +// lambda.WithContext(context.WithValue(context.Background(), "foo", "bar")) +// ) +func WithContext(ctx context.Context) Option { + return Option(func(h *handlerOptions) { + h.baseContext = ctx + }) +} - return responseBytes, nil +// WithContextValue adds a value to the handler context. +// If a base context was set using WithContext, that base is used as the parent. +// +// Usage: +// +// lambda.StartWithOptions( +// func (ctx context.Context) (string, error) { +// return ctx.Value("foo"), nil +// }, +// lambda.WithContextValue("foo", "bar") +// ) +func WithContextValue(key interface{}, value interface{}) Option { + return Option(func(h *handlerOptions) { + h.contextValues[key] = value + }) } -func errorHandler(e error) lambdaHandler { - return func(ctx context.Context, event []byte) (interface{}, error) { - return nil, e - } +// WithSetEscapeHTML sets the SetEscapeHTML argument on the underlying json encoder +// +// Usage: +// +// lambda.StartWithOptions( +// func () (string, error) { +// return "hello!>", nil +// }, +// lambda.WithSetEscapeHTML(true), +// ) +func WithSetEscapeHTML(escapeHTML bool) Option { + return Option(func(h *handlerOptions) { + h.jsonResponseEscapeHTML = escapeHTML + }) +} + +// WithSetIndent sets the SetIndent argument on the underling json encoder +// +// Usage: +// +// lambda.StartWithOptions( +// func (event any) (any, error) { +// return event, nil +// }, +// lambda.WithSetIndent(">"," "), +// ) +func WithSetIndent(prefix, indent string) Option { + return Option(func(h *handlerOptions) { + h.jsonResponseIndentPrefix = prefix + h.jsonResponseIndentValue = indent + }) +} + +// WithUseNumber sets the UseNumber option on the underlying json decoder +// +// Usage: +// +// lambda.StartWithOptions( +// func (event any) (any, error) { +// return event, nil +// }, +// lambda.WithUseNumber(true) +// ) +func WithUseNumber(useNumber bool) Option { + return Option(func(h *handlerOptions) { + h.jsonRequestUseNumber = useNumber + }) +} + +// WithUseNumber sets the DisallowUnknownFields option on the underlying json decoder +// +// Usage: +// +// lambda.StartWithOptions( +// func (event any) (any, error) { +// return event, nil +// }, +// lambda.WithDisallowUnknownFields(true) +// ) +func WithDisallowUnknownFields(disallowUnknownFields bool) Option { + return Option(func(h *handlerOptions) { + h.jsonRequestDisallowUnknownFields = disallowUnknownFields + }) } -func validateArguments(handler reflect.Type) (bool, error) { - handlerTakesContext := false - if handler.NumIn() > 2 { - return false, fmt.Errorf("handlers may not take more than two arguments, but handler takes %d", handler.NumIn()) - } else if handler.NumIn() > 0 { +// WithEnableSIGTERM enables SIGTERM behavior within the Lambda platform on container spindown. +// SIGKILL will occur ~500ms after SIGTERM. +// Optionally, an array of callback functions to run on SIGTERM may be provided. +// +// Usage: +// +// lambda.StartWithOptions( +// func (event any) (any, error) { +// return event, nil +// }, +// lambda.WithEnableSIGTERM(func() { +// log.Print("function container shutting down...") +// }) +// ) +func WithEnableSIGTERM(callbacks ...func()) Option { + return Option(func(h *handlerOptions) { + h.sigtermCallbacks = append(h.sigtermCallbacks, callbacks...) + h.enableSIGTERM = true + }) +} + +// handlerTakesContext returns whether the handler takes a context.Context as its first argument. +func handlerTakesContext(handler reflect.Type) (bool, error) { + switch handler.NumIn() { + case 0: + return false, nil + case 1: contextType := reflect.TypeOf((*context.Context)(nil)).Elem() argumentType := handler.In(0) - handlerTakesContext = argumentType.Implements(contextType) - if handler.NumIn() > 1 && !handlerTakesContext { + if argumentType.Kind() != reflect.Interface { + return false, nil + } + + // handlers like func(event any) are valid. + if argumentType.NumMethod() == 0 { + return false, nil + } + + if !contextType.Implements(argumentType) || !argumentType.Implements(contextType) { + return false, fmt.Errorf("handler takes an interface, but it is not context.Context: %q", argumentType.Name()) + } + return true, nil + case 2: + contextType := reflect.TypeOf((*context.Context)(nil)).Elem() + argumentType := handler.In(0) + if argumentType.Kind() != reflect.Interface || !contextType.Implements(argumentType) || !argumentType.Implements(contextType) { return false, fmt.Errorf("handler takes two arguments, but the first is not Context. got %s", argumentType.Kind()) } + return true, nil } - - return handlerTakesContext, nil + return false, fmt.Errorf("handlers may not take more than two arguments, but handler takes %d", handler.NumIn()) } func validateReturns(handler reflect.Type) error { errorType := reflect.TypeOf((*error)(nil)).Elem() - if handler.NumOut() > 2 { + + switch n := handler.NumOut(); { + case n > 2: return fmt.Errorf("handler may not return more than two values") - } else if handler.NumOut() > 1 { + case n > 1: if !handler.Out(1).Implements(errorType) { return fmt.Errorf("handler returns two values, but the second does not implement error") } - } else if handler.NumOut() == 1 { + case n == 1: if !handler.Out(0).Implements(errorType) { return fmt.Errorf("handler returns a single value, but it does not implement error") } } + return nil } // NewHandler creates a base lambda handler from the given handler function. The -// returned Handler performs JSON deserialization and deserialization, and -// delegates to the input handler function. The handler function parameter must -// satisfy the rules documented by Start. If handlerFunc is not a valid +// returned Handler performs JSON serialization and deserialization, and +// delegates to the input handler function. The handler function parameter must +// satisfy the rules documented by Start. If handlerFunc is not a valid // handler, the returned Handler simply reports the validation error. func NewHandler(handlerFunc interface{}) Handler { - if handlerFunc == nil { - return errorHandler(fmt.Errorf("handler is nil")) + return NewHandlerWithOptions(handlerFunc) +} + +// NewHandlerWithOptions creates a base lambda handler from the given handler function. The +// returned Handler performs JSON serialization and deserialization, and +// delegates to the input handler function. The handler function parameter must +// satisfy the rules documented by Start. If handlerFunc is not a valid +// handler, the returned Handler simply reports the validation error. +func NewHandlerWithOptions(handlerFunc interface{}, options ...Option) Handler { + return newHandler(handlerFunc, options...) +} + +func newHandler(handlerFunc interface{}, options ...Option) *handlerOptions { + if h, ok := handlerFunc.(*handlerOptions); ok { + return h + } + h := &handlerOptions{ + baseContext: context.Background(), + contextValues: map[interface{}]interface{}{}, + jsonResponseEscapeHTML: false, + jsonResponseIndentPrefix: "", + jsonResponseIndentValue: "", + } + for _, option := range options { + option(h) + } + for k, v := range h.contextValues { + h.baseContext = context.WithValue(h.baseContext, k, v) } - handler := reflect.ValueOf(handlerFunc) - handlerType := reflect.TypeOf(handlerFunc) + if h.enableSIGTERM { + enableSIGTERM(h.sigtermCallbacks) + } + h.handlerFunc = reflectHandler(handlerFunc, h) + return h +} + +type handlerFunc func(context.Context, []byte) (io.Reader, error) + +// back-compat for the rpc mode +func (h handlerFunc) Invoke(ctx context.Context, payload []byte) ([]byte, error) { + response, err := h(ctx, payload) + if err != nil { + return nil, err + } + // if the response needs to be closed (ex: net.Conn, os.File), ensure it's closed before the next invoke to prevent a resource leak + if response, ok := response.(io.Closer); ok { + defer response.Close() + } + // optimization: if the response is a *bytes.Buffer, a copy can be eliminated + switch response := response.(type) { + case *jsonOutBuffer: + return response.Bytes(), nil + case *bytes.Buffer: + return response.Bytes(), nil + } + b, err := ioutil.ReadAll(response) + if err != nil { + return nil, err + } + return b, nil +} + +func errorHandler(err error) handlerFunc { + return func(_ context.Context, _ []byte) (io.Reader, error) { + return nil, err + } +} + +type jsonOutBuffer struct { + *bytes.Buffer +} + +func (j *jsonOutBuffer) ContentType() string { + return contentTypeJSON +} + +func reflectHandler(f interface{}, h *handlerOptions) handlerFunc { + if f == nil { + return errorHandler(errors.New("handler is nil")) + } + + // back-compat: types with reciever `Invoke(context.Context, []byte) ([]byte, error)` need the return bytes wrapped + if handler, ok := f.(Handler); ok { + return func(ctx context.Context, payload []byte) (io.Reader, error) { + b, err := handler.Invoke(ctx, payload) + if err != nil { + return nil, err + } + return bytes.NewBuffer(b), nil + } + } + + handler := reflect.ValueOf(f) + handlerType := reflect.TypeOf(f) if handlerType.Kind() != reflect.Func { return errorHandler(fmt.Errorf("handler kind %s is not %s", handlerType.Kind(), reflect.Func)) } - takesContext, err := validateArguments(handlerType) + takesContext, err := handlerTakesContext(handlerType) if err != nil { return errorHandler(err) } @@ -96,7 +318,20 @@ func NewHandler(handlerFunc interface{}) Handler { return errorHandler(err) } - return lambdaHandler(func(ctx context.Context, payload []byte) (interface{}, error) { + out := &jsonOutBuffer{bytes.NewBuffer(nil)} + return func(ctx context.Context, payload []byte) (io.Reader, error) { + out.Reset() + in := bytes.NewBuffer(payload) + decoder := json.NewDecoder(in) + if h.jsonRequestUseNumber { + decoder.UseNumber() + } + if h.jsonRequestDisallowUnknownFields { + decoder.DisallowUnknownFields() + } + encoder := json.NewEncoder(out) + encoder.SetEscapeHTML(h.jsonResponseEscapeHTML) + encoder.SetIndent(h.jsonResponseIndentPrefix, h.jsonResponseIndentValue) trace := handlertrace.FromContext(ctx) @@ -108,8 +343,7 @@ func NewHandler(handlerFunc interface{}) Handler { if (handlerType.NumIn() == 1 && !takesContext) || handlerType.NumIn() == 2 { eventType := handlerType.In(handlerType.NumIn() - 1) event := reflect.New(eventType) - - if err := json.Unmarshal(payload, event.Interface()); err != nil { + if err := decoder.Decode(event.Interface()); err != nil { return nil, err } if nil != trace.RequestEvent { @@ -120,22 +354,42 @@ func NewHandler(handlerFunc interface{}) Handler { response := handler.Call(args) - // convert return values into (interface{}, error) - var err error + // return the error, if any if len(response) > 0 { - if errVal, ok := response[len(response)-1].Interface().(error); ok { - err = errVal + if errVal, ok := response[len(response)-1].Interface().(error); ok && errVal != nil { + return nil, errVal } } + // set the response value, if any var val interface{} if len(response) > 1 { val = response[0].Interface() - if nil != trace.ResponseEvent { trace.ResponseEvent(ctx, val) } } - return val, err - }) + // encode to JSON + if err := encoder.Encode(val); err != nil { + // if response is not JSON serializable, but the response type is a reader, return it as-is + if reader, ok := val.(io.Reader); ok { + return reader, nil + } + return nil, err + } + + // if response value is an io.Reader, return it as-is + if reader, ok := val.(io.Reader); ok { + // back-compat, don't return the reader if the value serialized to a non-empty json + if strings.HasPrefix(out.String(), "{}") { + return reader, nil + } + } + + // back-compat, strip the encoder's trailing newline unless WithSetIndent was used + if h.jsonResponseIndentValue == "" && h.jsonResponseIndentPrefix == "" { + out.Truncate(out.Len() - 1) + } + return out, nil + } } diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/invoke_loop.go b/vendor/github.com/aws/aws-lambda-go/lambda/invoke_loop.go new file mode 100644 index 0000000..338237e --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/lambda/invoke_loop.go @@ -0,0 +1,212 @@ +// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved + +package lambda + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log" + "os" + "strconv" + "time" + + "github.com/aws/aws-lambda-go/lambda/messages" + "github.com/aws/aws-lambda-go/lambdacontext" +) + +const ( + msPerS = int64(time.Second / time.Millisecond) + nsPerMS = int64(time.Millisecond / time.Nanosecond) +) + +// TODO: replace with time.UnixMillis after dropping version <1.17 from CI workflows +func unixMS(ms int64) time.Time { + return time.Unix(ms/msPerS, (ms%msPerS)*nsPerMS) +} + +// startRuntimeAPILoop will return an error if handling a particular invoke resulted in a non-recoverable error +func startRuntimeAPILoop(api string, handler Handler) error { + client := newRuntimeAPIClient(api) + h := newHandler(handler) + for { + invoke, err := client.next() + if err != nil { + return err + } + if err = handleInvoke(invoke, h); err != nil { + return err + } + } +} + +// handleInvoke returns an error if the function panics, or some other non-recoverable error occurred +func handleInvoke(invoke *invoke, handler *handlerOptions) error { + // set the deadline + deadline, err := parseDeadline(invoke) + if err != nil { + return reportFailure(invoke, lambdaErrorResponse(err)) + } + ctx, cancel := context.WithDeadline(handler.baseContext, deadline) + defer cancel() + + // set the invoke metadata values + lc := lambdacontext.LambdaContext{ + AwsRequestID: invoke.id, + InvokedFunctionArn: invoke.headers.Get(headerInvokedFunctionARN), + } + if err := parseClientContext(invoke, &lc.ClientContext); err != nil { + return reportFailure(invoke, lambdaErrorResponse(err)) + } + if err := parseCognitoIdentity(invoke, &lc.Identity); err != nil { + return reportFailure(invoke, lambdaErrorResponse(err)) + } + ctx = lambdacontext.NewContext(ctx, &lc) + + // set the trace id + traceID := invoke.headers.Get(headerTraceID) + os.Setenv("_X_AMZN_TRACE_ID", traceID) + // nolint:staticcheck + ctx = context.WithValue(ctx, "x-amzn-trace-id", traceID) + + // call the handler, marshal any returned error + response, invokeErr := callBytesHandlerFunc(ctx, invoke.payload, handler.handlerFunc) + if invokeErr != nil { + if err := reportFailure(invoke, invokeErr); err != nil { + return err + } + if invokeErr.ShouldExit { + return fmt.Errorf("calling the handler function resulted in a panic, the process should exit") + } + return nil + } + // if the response needs to be closed (ex: net.Conn, os.File), ensure it's closed before the next invoke to prevent a resource leak + if response, ok := response.(io.Closer); ok { + defer response.Close() + } + + // if the response defines a content-type, plumb it through + contentType := contentTypeBytes + type ContentType interface{ ContentType() string } + if response, ok := response.(ContentType); ok { + contentType = response.ContentType() + } + + if err := invoke.success(response, contentType); err != nil { + return fmt.Errorf("unexpected error occurred when sending the function functionResponse to the API: %v", err) + } + + return nil +} + +func reportFailure(invoke *invoke, invokeErr *messages.InvokeResponse_Error) error { + errorPayload := safeMarshal(invokeErr) + log.Printf("%s", errorPayload) + + causeForXRay, err := json.Marshal(makeXRayError(invokeErr)) + if err != nil { + return fmt.Errorf("unexpected error occured when serializing the function error cause for X-Ray: %v", err) + } + + if err := invoke.failure(bytes.NewReader(errorPayload), contentTypeJSON, causeForXRay); err != nil { + return fmt.Errorf("unexpected error occurred when sending the function error to the API: %v", err) + } + return nil +} + +func callBytesHandlerFunc(ctx context.Context, payload []byte, handler handlerFunc) (response io.Reader, invokeErr *messages.InvokeResponse_Error) { + defer func() { + if err := recover(); err != nil { + invokeErr = lambdaPanicResponse(err) + } + }() + response, err := handler(ctx, payload) + if err != nil { + return nil, lambdaErrorResponse(err) + } + return response, nil +} + +func parseDeadline(invoke *invoke) (time.Time, error) { + deadlineEpochMS, err := strconv.ParseInt(invoke.headers.Get(headerDeadlineMS), 10, 64) + if err != nil { + return time.Time{}, fmt.Errorf("failed to parse deadline: %v", err) + } + return unixMS(deadlineEpochMS), nil +} + +func parseCognitoIdentity(invoke *invoke, out *lambdacontext.CognitoIdentity) error { + cognitoIdentityJSON := invoke.headers.Get(headerCognitoIdentity) + if cognitoIdentityJSON != "" { + if err := json.Unmarshal([]byte(cognitoIdentityJSON), out); err != nil { + return fmt.Errorf("failed to unmarshal cognito identity json: %v", err) + } + } + return nil +} + +func parseClientContext(invoke *invoke, out *lambdacontext.ClientContext) error { + clientContextJSON := invoke.headers.Get(headerClientContext) + if clientContextJSON != "" { + if err := json.Unmarshal([]byte(clientContextJSON), out); err != nil { + return fmt.Errorf("failed to unmarshal client context json: %v", err) + } + } + return nil +} + +func safeMarshal(v interface{}) []byte { + payload, err := json.Marshal(v) + if err != nil { + v := &messages.InvokeResponse_Error{ + Type: "Runtime.SerializationError", + Message: err.Error(), + } + payload, err := json.Marshal(v) + if err != nil { + panic(err) // never reach + } + return payload + } + return payload +} + +type xrayException struct { + Type string `json:"type"` + Message string `json:"message"` + Stack []*messages.InvokeResponse_Error_StackFrame `json:"stack"` +} + +type xrayError struct { + WorkingDirectory string `json:"working_directory"` + Exceptions []xrayException `json:"exceptions"` + Paths []string `json:"paths"` +} + +func makeXRayError(invokeResponseError *messages.InvokeResponse_Error) *xrayError { + paths := make([]string, 0, len(invokeResponseError.StackTrace)) + visitedPaths := make(map[string]struct{}, len(invokeResponseError.StackTrace)) + for _, frame := range invokeResponseError.StackTrace { + if _, exists := visitedPaths[frame.Path]; !exists { + visitedPaths[frame.Path] = struct{}{} + paths = append(paths, frame.Path) + } + } + + cwd, _ := os.Getwd() + exceptions := []xrayException{{ + Type: invokeResponseError.Type, + Message: invokeResponseError.Message, + Stack: invokeResponseError.StackTrace, + }} + if exceptions[0].Stack == nil { + exceptions[0].Stack = []*messages.InvokeResponse_Error_StackFrame{} + } + return &xrayError{ + WorkingDirectory: cwd, + Paths: paths, + Exceptions: exceptions, + } +} diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/messages/README.md b/vendor/github.com/aws/aws-lambda-go/lambda/messages/README.md index 8bf6ef4..04eddfa 100644 --- a/vendor/github.com/aws/aws-lambda-go/lambda/messages/README.md +++ b/vendor/github.com/aws/aws-lambda-go/lambda/messages/README.md @@ -1,3 +1,3 @@ # Overview -[![GoDoc](https://godoc.org/github.com/aws/aws-lambda-go/lambda/messages?status.svg)](https://godoc.org/github.com/aws/aws-lambda-go/lambda/messages) +[![Go Reference](https://pkg.go.dev/badge/github.com/aws/aws-lambda-go/lambda/messages.svg)](https://pkg.go.dev/github.com/aws/aws-lambda-go/lambda/messages) diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/messages/messages.go b/vendor/github.com/aws/aws-lambda-go/lambda/messages/messages.go index d2ac65b..ea63001 100644 --- a/vendor/github.com/aws/aws-lambda-go/lambda/messages/messages.go +++ b/vendor/github.com/aws/aws-lambda-go/lambda/messages/messages.go @@ -2,25 +2,29 @@ package messages +import "fmt" + type PingRequest struct { } type PingResponse struct { } +//nolint:stylecheck type InvokeRequest_Timestamp struct { Seconds int64 Nanos int64 } +//nolint:stylecheck type InvokeRequest struct { Payload []byte - RequestId string + RequestId string //nolint:stylecheck XAmznTraceId string Deadline InvokeRequest_Timestamp InvokedFunctionArn string - CognitoIdentityId string - CognitoIdentityPoolId string + CognitoIdentityId string //nolint:stylecheck + CognitoIdentityPoolId string //nolint:stylecheck ClientContext []byte } @@ -29,13 +33,19 @@ type InvokeResponse struct { Error *InvokeResponse_Error } +//nolint:stylecheck type InvokeResponse_Error struct { - Message string - Type string - StackTrace []*InvokeResponse_Error_StackFrame - ShouldExit bool + Message string `json:"errorMessage"` + Type string `json:"errorType"` + StackTrace []*InvokeResponse_Error_StackFrame `json:"stackTrace,omitempty"` + ShouldExit bool `json:"-"` +} + +func (e InvokeResponse_Error) Error() string { + return fmt.Sprintf("%#v", e) } +//nolint:stylecheck type InvokeResponse_Error_StackFrame struct { Path string `json:"path"` Line int32 `json:"line"` diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/rpc_function.go b/vendor/github.com/aws/aws-lambda-go/lambda/rpc_function.go new file mode 100644 index 0000000..0c8e798 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/lambda/rpc_function.go @@ -0,0 +1,114 @@ +// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +//go:build !lambda.norpc +// +build !lambda.norpc + +package lambda + +import ( + "context" + "encoding/json" + "errors" + "log" + "net" + "net/rpc" + "os" + "time" + + "github.com/aws/aws-lambda-go/lambda/messages" + "github.com/aws/aws-lambda-go/lambdacontext" +) + +func init() { + // Register `startFunctionRPC` to be run if the _LAMBDA_SERVER_PORT environment variable is set. + // This happens when the runtime for the function is configured as `go1.x`. + // The value of the environment variable will be passed as the first argument to `startFunctionRPC`. + // This allows users to save a little bit of coldstart time in the download, by the dependencies brought in for RPC support. + // The tradeoff is dropping compatibility with the RPC mode of the go1.x runtime. + // To drop the rpc dependencies, compile with `-tags lambda.norpc` + startFunctions = append([]*startFunction{{ + env: "_LAMBDA_SERVER_PORT", + f: startFunctionRPC, + }}, startFunctions...) +} + +func startFunctionRPC(port string, handler Handler) error { + lis, err := net.Listen("tcp", "localhost:"+port) + if err != nil { + log.Fatal(err) + } + err = rpc.Register(NewFunction(handler)) + if err != nil { + log.Fatal("failed to register handler function") + } + rpc.Accept(lis) + return errors.New("accept should not have returned") +} + +// Function struct which wrap the Handler +// +// Deprecated: The Function type is public for the go1.x runtime internal use of the net/rpc package +type Function struct { + handler *handlerOptions +} + +// NewFunction which creates a Function with a given Handler +// +// Deprecated: The Function type is public for the go1.x runtime internal use of the net/rpc package +func NewFunction(handler Handler) *Function { + return &Function{newHandler(handler)} +} + +// Ping method which given a PingRequest and a PingResponse parses the PingResponse +func (fn *Function) Ping(req *messages.PingRequest, response *messages.PingResponse) error { + *response = messages.PingResponse{} + return nil +} + +// Invoke method try to perform a command given an InvokeRequest and an InvokeResponse +func (fn *Function) Invoke(req *messages.InvokeRequest, response *messages.InvokeResponse) error { + defer func() { + if err := recover(); err != nil { + response.Error = lambdaPanicResponse(err) + } + }() + + deadline := time.Unix(req.Deadline.Seconds, req.Deadline.Nanos).UTC() + invokeContext, cancel := context.WithDeadline(fn.baseContext(), deadline) + defer cancel() + + lc := &lambdacontext.LambdaContext{ + AwsRequestID: req.RequestId, + InvokedFunctionArn: req.InvokedFunctionArn, + Identity: lambdacontext.CognitoIdentity{ + CognitoIdentityID: req.CognitoIdentityId, + CognitoIdentityPoolID: req.CognitoIdentityPoolId, + }, + } + if len(req.ClientContext) > 0 { + if err := json.Unmarshal(req.ClientContext, &lc.ClientContext); err != nil { + response.Error = lambdaErrorResponse(err) + return nil + } + } + invokeContext = lambdacontext.NewContext(invokeContext, lc) + + // nolint:staticcheck + invokeContext = context.WithValue(invokeContext, "x-amzn-trace-id", req.XAmznTraceId) + os.Setenv("_X_AMZN_TRACE_ID", req.XAmznTraceId) + + payload, err := fn.handler.Invoke(invokeContext, req.Payload) + if err != nil { + response.Error = lambdaErrorResponse(err) + return nil + } + response.Payload = payload + return nil +} + +func (fn *Function) baseContext() context.Context { + if fn.handler.baseContext != nil { + return fn.handler.baseContext + } + return context.Background() +} diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/runtime_api_client.go b/vendor/github.com/aws/aws-lambda-go/lambda/runtime_api_client.go new file mode 100644 index 0000000..158bd6b --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/lambda/runtime_api_client.go @@ -0,0 +1,173 @@ +// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved +// +// Runtime API documentation: https://docs.aws.amazon.com/lambda/latest/dg/runtimes-api.html + +package lambda + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" //nolint: staticcheck + "log" + "net/http" + "runtime" +) + +const ( + headerAWSRequestID = "Lambda-Runtime-Aws-Request-Id" + headerDeadlineMS = "Lambda-Runtime-Deadline-Ms" + headerTraceID = "Lambda-Runtime-Trace-Id" + headerCognitoIdentity = "Lambda-Runtime-Cognito-Identity" + headerClientContext = "Lambda-Runtime-Client-Context" + headerInvokedFunctionARN = "Lambda-Runtime-Invoked-Function-Arn" + headerXRayErrorCause = "Lambda-Runtime-Function-Xray-Error-Cause" + trailerLambdaErrorType = "Lambda-Runtime-Function-Error-Type" + trailerLambdaErrorBody = "Lambda-Runtime-Function-Error-Body" + contentTypeJSON = "application/json" + contentTypeBytes = "application/octet-stream" + apiVersion = "2018-06-01" + xrayErrorCauseMaxSize = 1024 * 1024 +) + +type runtimeAPIClient struct { + baseURL string + userAgent string + httpClient *http.Client + buffer *bytes.Buffer +} + +func newRuntimeAPIClient(address string) *runtimeAPIClient { + client := &http.Client{ + Timeout: 0, // connections to the runtime API are never expected to time out + } + endpoint := "http://" + address + "/" + apiVersion + "/runtime/invocation/" + userAgent := "aws-lambda-go/" + runtime.Version() + return &runtimeAPIClient{endpoint, userAgent, client, bytes.NewBuffer(nil)} +} + +type invoke struct { + id string + payload []byte + headers http.Header + client *runtimeAPIClient +} + +// success sends the response payload for an in-progress invocation. +// Notes: +// - An invoke is not complete until next() is called again! +func (i *invoke) success(body io.Reader, contentType string) error { + url := i.client.baseURL + i.id + "/response" + return i.client.post(url, body, contentType, nil) +} + +// failure sends the payload to the Runtime API. This marks the function's invoke as a failure. +// Notes: +// - The execution of the function process continues, and is billed, until next() is called again! +// - A Lambda Function continues to be re-used for future invokes even after a failure. +// If the error is fatal (panic, unrecoverable state), exit the process immediately after calling failure() +func (i *invoke) failure(body io.Reader, contentType string, causeForXRay []byte) error { + url := i.client.baseURL + i.id + "/error" + return i.client.post(url, body, contentType, causeForXRay) +} + +// next connects to the Runtime API and waits for a new invoke Request to be available. +// Note: After a call to Done() or Error() has been made, a call to next() will complete the in-flight invoke. +func (c *runtimeAPIClient) next() (*invoke, error) { + url := c.baseURL + "next" + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("failed to construct GET request to %s: %v", url, err) + } + req.Header.Set("User-Agent", c.userAgent) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to get the next invoke: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + log.Printf("runtime API client failed to close %s response body: %v", url, err) + } + }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to GET %s: got unexpected status code: %d", url, resp.StatusCode) + } + + c.buffer.Reset() + _, err = c.buffer.ReadFrom(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read the invoke payload: %v", err) + } + + return &invoke{ + id: resp.Header.Get(headerAWSRequestID), + payload: c.buffer.Bytes(), + headers: resp.Header, + client: c, + }, nil +} + +func (c *runtimeAPIClient) post(url string, body io.Reader, contentType string, xrayErrorCause []byte) error { + b := newErrorCapturingReader(body) + req, err := http.NewRequest(http.MethodPost, url, b) + if err != nil { + return fmt.Errorf("failed to construct POST request to %s: %v", url, err) + } + req.Trailer = b.Trailer + req.Header.Set("User-Agent", c.userAgent) + req.Header.Set("Content-Type", contentType) + + if xrayErrorCause != nil && len(xrayErrorCause) < xrayErrorCauseMaxSize { + req.Header.Set(headerXRayErrorCause, string(xrayErrorCause)) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("failed to POST to %s: %v", url, err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + log.Printf("runtime API client failed to close %s response body: %v", url, err) + } + }() + if resp.StatusCode != http.StatusAccepted { + return fmt.Errorf("failed to POST to %s: got unexpected status code: %d", url, resp.StatusCode) + } + + _, err = io.Copy(ioutil.Discard, resp.Body) + if err != nil { + return fmt.Errorf("something went wrong reading the POST response from %s: %v", url, err) + } + + return nil +} + +func newErrorCapturingReader(r io.Reader) *errorCapturingReader { + trailer := http.Header{ + trailerLambdaErrorType: nil, + trailerLambdaErrorBody: nil, + } + return &errorCapturingReader{r, trailer} +} + +type errorCapturingReader struct { + reader io.Reader + Trailer http.Header +} + +func (r *errorCapturingReader) Read(p []byte) (int, error) { + if r.reader == nil { + return 0, io.EOF + } + n, err := r.reader.Read(p) + if err != nil && err != io.EOF { + lambdaErr := lambdaErrorResponse(err) + r.Trailer.Set(trailerLambdaErrorType, lambdaErr.Type) + r.Trailer.Set(trailerLambdaErrorBody, base64.StdEncoding.EncodeToString(safeMarshal(lambdaErr))) + return 0, io.EOF + } + return n, err +} diff --git a/vendor/github.com/aws/aws-lambda-go/lambda/sigterm.go b/vendor/github.com/aws/aws-lambda-go/lambda/sigterm.go new file mode 100644 index 0000000..b742e91 --- /dev/null +++ b/vendor/github.com/aws/aws-lambda-go/lambda/sigterm.go @@ -0,0 +1,53 @@ +// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +package lambda + +import ( + "log" + "os" + "os/signal" + "syscall" +) + +// enableSIGTERM configures an optional list of sigtermHandlers to run on process shutdown. +// This non-default behavior is enabled within Lambda using the extensions API. +func enableSIGTERM(sigtermHandlers []func()) { + // for fun, we'll also optionally register SIGTERM handlers + if len(sigtermHandlers) > 0 { + signaled := make(chan os.Signal, 1) + signal.Notify(signaled, syscall.SIGTERM) + go func() { + <-signaled + for _, f := range sigtermHandlers { + f() + } + }() + } + + // detect if we're actually running within Lambda + endpoint := os.Getenv("AWS_LAMBDA_RUNTIME_API") + if endpoint == "" { + log.Print("WARNING! AWS_LAMBDA_RUNTIME_API environment variable not found. Skipping attempt to register internal extension...") + return + } + + // Now to do the AWS Lambda specific stuff. + // The default Lambda behavior is for functions to get SIGKILL at the end of lifetime, or after a timeout. + // Any use of the Lambda extension register API enables SIGTERM to be sent to the function process before the SIGKILL. + // We'll register an extension that does not listen for any lifecycle events named "GoLangEnableSIGTERM". + // The API will respond with an ID we need to pass in future requests. + client := newExtensionAPIClient(endpoint) + id, err := client.register("GoLangEnableSIGTERM") + if err != nil { + log.Printf("WARNING! Failed to register internal extension! SIGTERM events may not be enabled! err: %v", err) + return + } + + // We didn't actually register for any events, but we need to call /next anyways to let the API know we're done initalizing. + // Because we didn't register for any events, /next will never return, so we'll do this in a go routine that is doomed to stay blocked. + go func() { + _, err := client.next(id) + log.Printf("WARNING! Reached expected unreachable code! Extension /next call expected to block forever! err: %v", err) + }() + +} diff --git a/vendor/github.com/aws/aws-lambda-go/lambdacontext/README.md b/vendor/github.com/aws/aws-lambda-go/lambdacontext/README.md index 44d648a..e8c2299 100644 --- a/vendor/github.com/aws/aws-lambda-go/lambdacontext/README.md +++ b/vendor/github.com/aws/aws-lambda-go/lambdacontext/README.md @@ -1,3 +1,3 @@ # Overview -[![GoDoc](https://godoc.org/github.com/aws/aws-lambda-go/lambdacontext?status.svg)](https://godoc.org/github.com/aws/aws-lambda-go/lambdacontext) +[![Go Reference](https://pkg.go.dev/badge/github.com/aws/aws-lambda-go/lambdacontext.svg)](https://pkg.go.dev/github.com/aws/aws-lambda-go/lambdacontext) diff --git a/vendor/github.com/aws/aws-lambda-go/lambdacontext/context.go b/vendor/github.com/aws/aws-lambda-go/lambdacontext/context.go index 3593c44..bd2e166 100644 --- a/vendor/github.com/aws/aws-lambda-go/lambdacontext/context.go +++ b/vendor/github.com/aws/aws-lambda-go/lambdacontext/context.go @@ -2,7 +2,7 @@ // // Helpers for accessing context information from an Invoke request. Context information // is stored in a https://golang.org/pkg/context/#Context. The functions FromContext and NewContext -// are used to retrieving and inserting an isntance of LambdaContext. +// are used to retrieving and inserting an instance of LambdaContext. package lambdacontext @@ -62,8 +62,8 @@ type CognitoIdentity struct { // LambdaContext is the set of metadata that is passed for every Invoke. type LambdaContext struct { - AwsRequestID string - InvokedFunctionArn string + AwsRequestID string //nolint: stylecheck + InvokedFunctionArn string //nolint: stylecheck Identity CognitoIdentity ClientContext ClientContext } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go index 710eb43..11d4240 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -50,9 +50,19 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { for i, n := range names { val := v.FieldByName(n) + ft, ok := v.Type().FieldByName(n) + if !ok { + panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type())) + } + buf.WriteString(strings.Repeat(" ", indent+2)) buf.WriteString(n + ": ") - prettify(val, indent+2, buf) + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + prettify(val, indent+2, buf) + } if i < len(names)-1 { buf.WriteString(",\n") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go index 645df24..3f7cffd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -8,6 +8,8 @@ import ( ) // StringValue returns the string representation of a value. +// +// Deprecated: Use Prettify instead. func StringValue(i interface{}) string { var buf bytes.Buffer stringValue(reflect.ValueOf(i), 0, &buf) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 74f35cc..b147f10 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -10,12 +10,13 @@ import ( // A Config provides configuration to a service client instance. type Config struct { - Config *aws.Config - Handlers request.Handlers - PartitionID string - Endpoint string - SigningRegion string - SigningName string + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + ResolvedRegion string // States that the signing name did not come from a modeled source but // was derived based on other data. Used by service client constructors diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 1d774cf..5ac5c24 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -53,7 +53,7 @@ var LogHTTPRequestHandler = request.NamedHandler{ } func logRequest(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { return } @@ -94,6 +94,10 @@ var LogHTTPRequestHeaderHandler = request.NamedHandler{ } func logRequestHeader(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) if err != nil { r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, @@ -124,7 +128,7 @@ var LogHTTPResponseHandler = request.NamedHandler{ } func logResponse(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { return } @@ -186,7 +190,7 @@ var LogHTTPResponseHeaderHandler = request.NamedHandler{ } func logResponseHeader(r *request.Request) { - if r.Config.Logger == nil { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 0c48f72..a7530eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -2,13 +2,14 @@ package metadata // ClientInfo wraps immutable data from the client.Client structure. type ClientInfo struct { - ServiceName string - ServiceID string - APIVersion string - PartitionID string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string + ResolvedRegion string } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 39fa6d5..4818ea4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -170,6 +170,9 @@ type Config struct { // // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case // Metadata member's map keys. The value of the header in the map is unaffected. + // + // The AWS SDK for Go v2, uses lower case header maps by default. The v1 + // SDK provides this opt-in for this option, for backwards compatibility. LowerCaseHeaderMaps *bool // Set this to `true` to disable the EC2Metadata client from overriding the @@ -208,8 +211,19 @@ type Config struct { // svc := s3.New(sess, &aws.Config{ // UseDualStack: aws.Bool(true), // }) + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. UseDualStack *bool + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint endpoints.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint endpoints.FIPSEndpointState + // SleepDelay is an override for the func the SDK will call when sleeping // during the lifecycle of a request. Specifically this will be used for // request delays. This value should only be used for testing. To adjust @@ -554,6 +568,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.UseDualStack = other.UseDualStack } + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + if other.EC2MetadataDisableTimeoutOverride != nil { dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride } @@ -589,6 +607,14 @@ func mergeInConfig(dst *Config, other *Config) { if other.LowerCaseHeaderMaps != nil { dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps } + + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + + if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset { + dst.UseFIPSEndpoint = other.UseFIPSEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index cefe2a7..19ad619 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -28,7 +28,7 @@ const ( // compare test values. var now = time.Now -// TokenFetcher shuold return WebIdentity token bytes or an error +// TokenFetcher should return WebIdentity token bytes or an error type TokenFetcher interface { FetchToken(credentials.Context) ([]byte, error) } @@ -50,6 +50,8 @@ func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { // an OIDC token. type WebIdentityRoleProvider struct { credentials.Expiry + + // The policy ARNs to use with the web identity assumed role. PolicyArns []*sts.PolicyDescriptorType // Duration the STS credentials will be valid for. Truncated to seconds. @@ -74,6 +76,9 @@ type WebIdentityRoleProvider struct { // NewWebIdentityCredentials will return a new set of credentials with a given // configuration, role arn, and token file path. +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options, and wrap with credentials.NewCredentials helper. func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { svc := sts.New(c) p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) @@ -82,19 +87,42 @@ func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName // NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path)) } // NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI and a TokenFetcher +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { - return &WebIdentityRoleProvider{ + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher) +} + +// NewWebIdentityRoleProviderWithOptions will return an initialize +// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a +// TokenFetcher. Additional options can be provided as functional options. +// +// TokenFetcher is the implementation that will retrieve the JWT token from to +// assume the role with. Use the provided FetchTokenPath implementation to +// retrieve the JWT token using a file system path. +func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider { + p := WebIdentityRoleProvider{ client: svc, tokenFetcher: tokenFetcher, roleARN: roleARN, roleSessionName: roleSessionName, } + + for _, fn := range optFns { + fn(&p) + } + + return &p } // Retrieve will attempt to assume a role from a token which is located at @@ -104,9 +132,9 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { return p.RetrieveWithContext(aws.BackgroundContext()) } -// RetrieveWithContext will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. +// RetrieveWithContext will attempt to assume a role from a token which is +// located at 'WebIdentityTokenFilePath' specified destination and if that is +// empty an error will be returned. func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { b, err := p.tokenFetcher.FetchToken(ctx) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index b98ea86..8d65ca1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -81,7 +81,6 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol // Customization for i := 0; i < len(ps); i++ { p := &ps[i] - custAddS3DualStack(p) custRegionalS3(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) @@ -91,15 +90,6 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol return ps, nil } -func custAddS3DualStack(p *partition) { - if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { - return - } - - custAddDualstack(p, "s3") - custAddDualstack(p, "s3-control") -} - func custRegionalS3(p *partition) { if p.ID != "aws" { return @@ -110,35 +100,28 @@ func custRegionalS3(p *partition) { return } + const awsGlobal = "aws-global" + const usEast1 = "us-east-1" + // If global endpoint already exists no customization needed. - if _, ok := service.Endpoints["aws-global"]; ok { + if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok { return } - service.PartitionEndpoint = "aws-global" - service.Endpoints["us-east-1"] = endpoint{} - service.Endpoints["aws-global"] = endpoint{ + service.PartitionEndpoint = awsGlobal + if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok { + service.Endpoints[endpointKey{Region: usEast1}] = endpoint{} + } + service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{ Hostname: "s3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: usEast1, }, } p.Services["s3"] = service } -func custAddDualstack(p *partition, svcName string) { - s, ok := p.Services[svcName] - if !ok { - return - } - - s.Defaults.HasDualStack = boxedTrue - s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - - p.Services[svcName] = s -} - func custRmIotDataService(p *partition) { delete(p.Services, "data.iot") } @@ -155,12 +138,13 @@ func custFixAppAutoscalingChina(p *partition) { } const expectHostname = `autoscaling.{region}.amazonaws.com` - if e, a := s.Defaults.Hostname, expectHostname; e != a { + serviceDefault := s.Defaults[defaultKey{}] + if e, a := expectHostname, serviceDefault.Hostname; e != a { fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) return } - - s.Defaults.Hostname = expectHostname + ".cn" + serviceDefault.Hostname = expectHostname + ".cn" + s.Defaults[defaultKey{}] = serviceDefault p.Services[serviceName] = s } @@ -175,18 +159,25 @@ func custFixAppAutoscalingUsGov(p *partition) { return } - if a := s.Defaults.CredentialScope.Service; a != "" { + serviceDefault := s.Defaults[defaultKey{}] + if a := serviceDefault.CredentialScope.Service; a != "" { fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) return } - if a := s.Defaults.Hostname; a != "" { + if a := serviceDefault.Hostname; a != "" { fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) return } - s.Defaults.CredentialScope.Service = "application-autoscaling" - s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + serviceDefault.CredentialScope.Service = "application-autoscaling" + serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com" + + if s.Defaults == nil { + s.Defaults = make(endpointDefaults) + } + + s.Defaults[defaultKey{}] = serviceDefault p.Services[serviceName] = s } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 4538d2f..ae816bd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -25,6 +25,7 @@ const ( ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). @@ -55,6 +56,7 @@ const ( // AWS ISO (US) partition's regions. const ( UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. + UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST. ) // AWS ISOB (US) partition's regions. @@ -104,10 +106,36 @@ var awsPartition = partition{ return reg }(), }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, Regions: regions{ "af-south-1": region{ @@ -134,6 +162,9 @@ var awsPartition = partition{ "ap-southeast-2": region{ Description: "Asia Pacific (Sydney)", }, + "ap-southeast-3": region{ + Description: "Asia Pacific (Jakarta)", + }, "ca-central-1": region{ Description: "Canada (Central)", }, @@ -176,965 +207,3217 @@ var awsPartition = partition{ }, Services: services{ "a4b": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, "access-analyzer": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "account": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "account.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "acm": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ Hostname: "acm-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "acm-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "acm-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "acm-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "acm-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "acm-pca-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "acm-pca-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "acm-pca-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "acm-pca-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "airflow": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "amplify": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "amplifybackend": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplifyuibuilder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "api.detective": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "api.detective-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "api.detective-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "api.detective-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "api.detective-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, "api.ecr": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ Hostname: "api.ecr.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, }, - "ap-east-1": endpoint{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ Hostname: "api.ecr.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, }, - "ap-northeast-1": endpoint{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ Hostname: "api.ecr.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, }, - "ap-northeast-2": endpoint{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ Hostname: "api.ecr.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, - "ap-northeast-3": endpoint{ + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ Hostname: "api.ecr.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, }, - "ap-south-1": endpoint{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{ Hostname: "api.ecr.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, }, - "ap-southeast-1": endpoint{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ Hostname: "api.ecr.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, }, - "ap-southeast-2": endpoint{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ Hostname: "api.ecr.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, - "ca-central-1": endpoint{ + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ Hostname: "api.ecr.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, }, - "eu-central-1": endpoint{ + endpointKey{ + Region: "dkr-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ Hostname: "api.ecr.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, - "eu-north-1": endpoint{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{ Hostname: "api.ecr.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, }, - "eu-south-1": endpoint{ + endpointKey{ + Region: "eu-south-1", + }: endpoint{ Hostname: "api.ecr.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, }, - "eu-west-1": endpoint{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{ Hostname: "api.ecr.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, }, - "eu-west-2": endpoint{ + endpointKey{ + Region: "eu-west-2", + }: endpoint{ Hostname: "api.ecr.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, }, - "eu-west-3": endpoint{ + endpointKey{ + Region: "eu-west-3", + }: endpoint{ Hostname: "api.ecr.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, }, - "fips-dkr-us-east-1": endpoint{ + endpointKey{ + Region: "fips-dkr-us-east-1", + }: endpoint{ Hostname: "ecr-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-dkr-us-east-2": endpoint{ + endpointKey{ + Region: "fips-dkr-us-east-2", + }: endpoint{ Hostname: "ecr-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-dkr-us-west-1": endpoint{ + endpointKey{ + Region: "fips-dkr-us-west-1", + }: endpoint{ Hostname: "ecr-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-dkr-us-west-2": endpoint{ + endpointKey{ + Region: "fips-dkr-us-west-2", + }: endpoint{ Hostname: "ecr-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "ecr-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "ecr-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "ecr-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "ecr-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{ + endpointKey{ + Region: "me-south-1", + }: endpoint{ Hostname: "api.ecr.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, }, - "sa-east-1": endpoint{ + endpointKey{ + Region: "sa-east-1", + }: endpoint{ Hostname: "api.ecr.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, }, - "us-east-1": endpoint{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ Hostname: "api.ecr.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{ + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ Hostname: "api.ecr.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-1": endpoint{ + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ Hostname: "api.ecr.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, - "us-west-2": endpoint{ + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ Hostname: "api.ecr.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "api.elastic-inference": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", }, - "ap-northeast-2": endpoint{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", }, - "eu-west-1": endpoint{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{ Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", }, - "us-east-1": endpoint{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ Hostname: "api.elastic-inference.us-east-1.amazonaws.com", }, - "us-east-2": endpoint{ + endpointKey{ + Region: "us-east-2", + }: endpoint{ Hostname: "api.elastic-inference.us-east-2.amazonaws.com", }, - "us-west-2": endpoint{ + endpointKey{ + Region: "us-west-2", + }: endpoint{ Hostname: "api.elastic-inference.us-west-2.amazonaws.com", }, }, }, "api.fleethub.iot": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "api.iotdeviceadvisor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, "api.mediatailor": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "api.pricing": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, }, }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "us-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, "api.sagemaker": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", }, }, }, "apigateway": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "app-integrations": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "appflow": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "appmesh": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-3.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-2.api.aws", + }, }, }, "apprunner": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + }, }, }, "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "appstream2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "appsync": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "aps": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "athena": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "athena-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "athena-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "athena-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "athena-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + }, + }, + "auditmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "backup": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "batch": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "fips.batch.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "fips.batch.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "fips.batch.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "fips.batch.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + }, + }, + }, + "billingconductor": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "billingconductor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + }, + }, + "braket": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "budgets": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ Hostname: "budgets.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", @@ -1142,12 +3425,20 @@ var awsPartition = partition{ }, }, }, + "catalog.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "ce": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ Hostname: "ce.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", @@ -1158,11 +3449,15 @@ var awsPartition = partition{ "chime": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "aws-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ Hostname: "chime.us-east-1.amazonaws.com", Protocols: []string{"https"}, CredentialScope: credentialScope{ @@ -1172,99 +3467,385 @@ var awsPartition = partition{ }, }, "cloud9": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", + }, }, }, "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "cloudformation": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "cloudformation-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "cloudformation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "cloudformation-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "cloudformation-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, "cloudfront": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ Hostname: "cloudfront.amazonaws.com", Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ @@ -1274,915 +3855,2914 @@ var awsPartition = partition{ }, }, "cloudhsm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "cloudsearch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "cloudtrail": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "codeartifact": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "codebuild": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "codebuild-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "codebuild-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "codebuild-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "codebuild-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, "codecommit": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ Hostname: "codecommit-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "codedeploy": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "codedeploy-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "codedeploy-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "codedeploy-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "codedeploy-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, "codeguru-reviewer": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "codepipeline": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "codepipeline-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "codepipeline-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "codepipeline-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "codepipeline-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "codestar": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "codestar-connections": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "cognito-identity": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "cognito-idp": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "cognito-sync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "comprehend-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "comprehend-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "comprehend-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, "comprehendmedical": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + }, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "compute-optimizer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "compute-optimizer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "compute-optimizer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "compute-optimizer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "compute-optimizer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "compute-optimizer.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, "config": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "config-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "config-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "config-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "config-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "connect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "connect-campaigns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "contact-lens": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "data.mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dataexchange": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, - "datasync": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "dax": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "data.mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dataexchange": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datapipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devops-guru": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + }, }, }, "directconnect": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "directconnect-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "directconnect-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "directconnect-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "directconnect-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "discovery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "dms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "dms-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "dms-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "docdb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ Hostname: "rds.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, }, - "ap-northeast-2": endpoint{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ Hostname: "rds.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, - "ap-south-1": endpoint{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{ Hostname: "rds.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, }, - "ap-southeast-1": endpoint{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ Hostname: "rds.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, }, - "ap-southeast-2": endpoint{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ Hostname: "rds.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, - "ca-central-1": endpoint{ + endpointKey{ + Region: "ca-central-1", + }: endpoint{ Hostname: "rds.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, }, - "eu-central-1": endpoint{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, - "eu-west-1": endpoint{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{ Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, }, - "eu-west-2": endpoint{ + endpointKey{ + Region: "eu-west-2", + }: endpoint{ Hostname: "rds.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, }, - "eu-west-3": endpoint{ + endpointKey{ + Region: "eu-west-3", + }: endpoint{ Hostname: "rds.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, }, - "sa-east-1": endpoint{ + endpointKey{ + Region: "sa-east-1", + }: endpoint{ Hostname: "rds.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, }, - "us-east-1": endpoint{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{ + endpointKey{ + Region: "us-east-2", + }: endpoint{ Hostname: "rds.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-2": endpoint{ + endpointKey{ + Region: "us-west-2", + }: endpoint{ Hostname: "rds.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", @@ -2190,5502 +6770,17596 @@ var awsPartition = partition{ }, }, }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "ds": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "ds-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "ds-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "ds-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "ds-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "ds-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, - }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "dynamodb-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "dynamodb-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "dynamodb-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "dynamodb-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, "ebs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "ebs-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "ebs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "ebs-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "ebs-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "ebs-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "ec2-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "ec2-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "ec2-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "ec2-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "ec2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "ecs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "ecs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "ecs-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "ecs-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "ecs-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + }, + }, + "edge.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "fips.eks.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "fips.eks.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "fips.eks.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "fips.eks.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "elasticache": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ Hostname: "elasticache-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "elasticfilesystem": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-af-south-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, + Deprecated: boxedTrue, }, - "fips-ap-east-1": endpoint{ + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-1": endpoint{ + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-2": endpoint{ + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-3": endpoint{ + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, + Deprecated: boxedTrue, }, - "fips-ap-south-1": endpoint{ + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-1": endpoint{ + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-2": endpoint{ + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, }, - "fips-ca-central-1": endpoint{ + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-central-1": endpoint{ + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-north-1": endpoint{ + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, + Deprecated: boxedTrue, }, - "fips-eu-south-1": endpoint{ + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-1": endpoint{ + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-2": endpoint{ + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, + Deprecated: boxedTrue, }, - "fips-eu-west-3": endpoint{ + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, + Deprecated: boxedTrue, }, - "fips-me-south-1": endpoint{ + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, + Deprecated: boxedTrue, }, - "fips-sa-east-1": endpoint{ + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + }, + }, + }, "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", SSLCommonName: "{service}.{region}.{dnsSuffix}", }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + }, }, }, "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "email": service{ - - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + }, }, }, "emr-containers": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "emr-containers-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "emr-containers-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "emr-containers-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, }, }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, "es": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "es-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "events": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "events-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "events-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "events-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "events-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + }, + }, + }, + "evidently": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "evidently.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "evidently.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "evidently.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "evidently.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "evidently.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "evidently.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "evidently.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "evidently.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "evidently.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "finspace": service{ - - Endpoints: endpoints{ - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "finspace-api": service{ - - Endpoints: endpoints{ - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "firehose": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "firehose-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "firehose-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "firehose-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "firehose-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-af-south-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ Hostname: "fms-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, + Deprecated: boxedTrue, }, - "fips-ap-east-1": endpoint{ + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ Hostname: "fms-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-1": endpoint{ + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ Hostname: "fms-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-2": endpoint{ + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ Hostname: "fms-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, + Deprecated: boxedTrue, }, - "fips-ap-south-1": endpoint{ + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ Hostname: "fms-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-1": endpoint{ + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ Hostname: "fms-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-2": endpoint{ + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ Hostname: "fms-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, + Deprecated: boxedTrue, }, - "fips-ca-central-1": endpoint{ + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "fms-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-central-1": endpoint{ + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ Hostname: "fms-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-south-1": endpoint{ + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ Hostname: "fms-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-1": endpoint{ + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ Hostname: "fms-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-2": endpoint{ + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ Hostname: "fms-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, + Deprecated: boxedTrue, }, - "fips-eu-west-3": endpoint{ + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ Hostname: "fms-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, + Deprecated: boxedTrue, }, - "fips-me-south-1": endpoint{ + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ Hostname: "fms-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, + Deprecated: boxedTrue, }, - "fips-sa-east-1": endpoint{ + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ Hostname: "fms-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "fms-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "fms-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "fms-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "fms-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "forecast": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "forecast-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "forecast-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "forecast-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, "forecastquery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "forecastquery-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "forecastquery-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "forecastquery-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + }, + }, + "frauddetector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "fsx": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-prod-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-ca-central-1", + }: endpoint{ Hostname: "fsx-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-prod-us-east-1": endpoint{ + endpointKey{ + Region: "fips-prod-us-east-1", + }: endpoint{ Hostname: "fsx-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-prod-us-east-2": endpoint{ + endpointKey{ + Region: "fips-prod-us-east-2", + }: endpoint{ Hostname: "fsx-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-prod-us-west-1": endpoint{ + endpointKey{ + Region: "fips-prod-us-west-1", + }: endpoint{ Hostname: "fsx-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-prod-us-west-2": endpoint{ + endpointKey{ + Region: "fips-prod-us-west-2", + }: endpoint{ Hostname: "fsx-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "prod-ca-central-1", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "prod-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "prod-us-east-1", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "groundstation": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "groundstation-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "prod-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "groundstation-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "prod-us-east-2", + }: endpoint{ CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "groundstation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "me-south-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "prod-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "prod-us-west-1", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "prod-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "prod-us-west-2", + }: endpoint{ CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "fips-us-east-2": endpoint{ - Hostname: "health-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "prod-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", }, }, }, - "healthlake": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "gamesparks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, - "honeycode": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "iam-fips": endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, }, }, - }, - "identity-chime": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "identity-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", }, - }, - }, - "identitystore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "iot-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "iot-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "iot-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", }, - "fips-us-west-2": endpoint{ - Hostname: "iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotanalytics": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotevents": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, - "ioteventsdata": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + "grafana": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "grafana.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, }, - "ap-northeast-2": endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "grafana.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, - "ap-southeast-1": endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "grafana.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, }, - "ap-southeast-2": endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "grafana.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, - "eu-central-1": endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "grafana.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, - "eu-west-1": endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "grafana.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, }, - "eu-west-2": endpoint{ - Hostname: "data.iotevents.eu-west-2.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "grafana.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, }, - "us-east-1": endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "grafana.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "grafana.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-2": endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "grafana.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, }, }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotthingsgraph": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, }, }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "iotwireless": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + "groundstation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "eu-west-1": endpoint{ - Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", }, - "us-east-1": endpoint{ - Hostname: "api.iotwireless.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "api.iotwireless.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "kafka": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "health": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "healthlake": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "kms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "honeycode": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "lakeformation": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "iam.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "iam", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "iam", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "iam-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", + }, + }, + "identity-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", + }, + }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", + Service: "IngestionService", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "license-manager": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "lightsail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "logs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", + "iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ CredentialScope: credentialScope{ - Region: "us-east-1", + Service: "execute-api", }, }, - "fips-us-east-2": endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Service: "execute-api", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Service: "execute-api", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Service: "execute-api", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "execute-api", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lookoutequipment": service{ - - Endpoints: endpoints{ - "ap-northeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, }, }, - "lookoutvision": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "macie": service{ - - Endpoints: endpoints{ - "fips-us-east-1": endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-northeast-2", }, }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "macie2": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "data.iotevents.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-south-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-southeast-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-southeast-2", }, }, - "fips-us-west-2": endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "data.iotevents.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "managedblockchain": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconnect": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "medialive": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "medialive-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "medialive-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "medialive-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediapackage": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "iotthingsgraph": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "messaging-chime": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + "iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mgh": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "models-fips.lex.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "models-fips.lex.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, }, }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", + "ivs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ivschat": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kafkaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "mq": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "neptune": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "rds.ap-east-1.amazonaws.com", + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "af-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, + Deprecated: boxedTrue, }, - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, + Deprecated: boxedTrue, }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, + Deprecated: boxedTrue, }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "ap-southeast-3", }, + Deprecated: boxedTrue, }, - "eu-north-1": endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, + Deprecated: boxedTrue, }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, + Deprecated: boxedTrue, }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, + Deprecated: boxedTrue, }, - "eu-west-3": endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{ - Hostname: "rds.me-south-1.amazonaws.com", + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{ - Hostname: "rds.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "oidc": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "oidc.ap-northeast-1.amazonaws.com", + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-northeast-2": endpoint{ - Hostname: "oidc.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "ap-south-1": endpoint{ - Hostname: "oidc.ap-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", }, - "ap-southeast-2": endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "eu-central-1": endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "eu-north-1": endpoint{ - Hostname: "oidc.eu-north-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-north-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", }, - "eu-west-1": endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "eu-west-2": endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "eu-west-3": endpoint{ - Hostname: "oidc.eu-west-3.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-3", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lightsail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", }, }, }, - "opsworks": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "lookoutequipment": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "lookoutmetrics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "lookoutvision": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", + "machinelearning": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "macie": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-aws-global": endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", }, }, }, - "outposts": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", + "macie2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "personalize": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + "marketplacecommerceanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "media-pipelines-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "pinpoint.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + }, + }, + "mediaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", }, - "us-west-2": endpoint{ - Hostname: "pinpoint.us-west-2.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "portal.sso": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", }, - "eu-central-1": endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackage-vod": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "eu-west-1": endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-west-2", }, - }, - "eu-west-2": endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", + Deprecated: boxedTrue, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "memory-db-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-2", + Region: "us-west-1", }, }, - "us-east-1": endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "messaging-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "migrationhub-strategy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mobileanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "profile": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "projects.iot1click": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - }, - "qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "qldb-fips.us-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "qldb-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "qldb-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ram-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", }, - "fips-us-east-1": endpoint{ - Hostname: "ram-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "ram-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "ram-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "ram-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "rds": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "rds-fips.ca-central-1": endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", + "mturk-requester": service{ + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "sandbox", + }: endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "ap-east-1", }, }, - "rds-fips.us-east-1": endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - "rds-fips.us-east-2": endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-northeast-2", }, }, - "rds-fips.us-west-1": endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-south-1", }, }, - "rds-fips.us-west-2": endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-southeast-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, }, - "fips-us-east-1": endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-central-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-north-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-west-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "rekognition-fips.ca-central-1": endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "eu-west-3", }, }, - "rekognition-fips.us-east-1": endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "rekognition-fips.us-east-2": endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "rekognition-fips.us-west-1": endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, - "rekognition-fips.us-west-2": endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "resource-groups": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "networkmanager.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "robomaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "nimble": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-east-1", }, }, - "fips-aws-global": endpoint{ - Hostname: "route53-fips.amazonaws.com", + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - }, - }, - "route53-recovery-control-config": service{ - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-northeast-2", }, }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-3", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-south-1", }, }, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-southeast-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-southeast-2", }, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ca-central-1", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-central-1", }, }, - }, - }, - "s3": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "accesspoint-af-south-1": endpoint{ - Hostname: "s3-accesspoint.af-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ap-east-1": endpoint{ - Hostname: "s3-accesspoint.ap-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "opsworks-cm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "profile": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "projects.iot1click": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "proton": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "api", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "rds-fips.ca-central-1", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-1", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-2", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-1", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-2", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rekognition": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "rekognition-fips.ca-central-1", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-1", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-2", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-2", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53-recovery-control-config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "route53domains": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rum": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "s3-external-1", + }: endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "sdb.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog-appregistry": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + }, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "servicediscovery", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "session.qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, }, - "accesspoint-ap-northeast-1": endpoint{ - Hostname: "s3-accesspoint.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, - "accesspoint-ap-northeast-2": endpoint{ - Hostname: "s3-accesspoint.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, }, - "accesspoint-ap-northeast-3": endpoint{ - Hostname: "s3-accesspoint.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", }, - "accesspoint-ap-south-1": endpoint{ - Hostname: "s3-accesspoint.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", }, - "accesspoint-ap-southeast-1": endpoint{ - Hostname: "s3-accesspoint.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", }, - "accesspoint-ap-southeast-2": endpoint{ - Hostname: "s3-accesspoint.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", }, - "accesspoint-ca-central-1": endpoint{ - Hostname: "s3-accesspoint.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", }, - "accesspoint-eu-central-1": endpoint{ - Hostname: "s3-accesspoint.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", }, - "accesspoint-eu-north-1": endpoint{ - Hostname: "s3-accesspoint.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", }, - "accesspoint-eu-south-1": endpoint{ - Hostname: "s3-accesspoint.eu-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", }, - "accesspoint-eu-west-1": endpoint{ - Hostname: "s3-accesspoint.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", }, - "accesspoint-eu-west-2": endpoint{ - Hostname: "s3-accesspoint.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", }, - "accesspoint-eu-west-3": endpoint{ - Hostname: "s3-accesspoint.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", }, - "accesspoint-me-south-1": endpoint{ - Hostname: "s3-accesspoint.me-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", }, - "accesspoint-sa-east-1": endpoint{ - Hostname: "s3-accesspoint.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", }, - "accesspoint-us-east-1": endpoint{ - Hostname: "s3-accesspoint.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", }, - "accesspoint-us-east-2": endpoint{ - Hostname: "s3-accesspoint.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", }, - "accesspoint-us-west-1": endpoint{ - Hostname: "s3-accesspoint.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, }, - "accesspoint-us-west-2": endpoint{ - Hostname: "s3-accesspoint.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, }, - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, }, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, }, - "aws-global": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-accesspoint-ca-central-1": endpoint{ - Hostname: "s3-accesspoint-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, }, - "fips-accesspoint-us-east-1": endpoint{ - Hostname: "s3-accesspoint-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, - "fips-accesspoint-us-east-2": endpoint{ - Hostname: "s3-accesspoint-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", }, - "fips-accesspoint-us-west-1": endpoint{ - Hostname: "s3-accesspoint-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", }, - "fips-accesspoint-us-west-2": endpoint{ - Hostname: "s3-accesspoint-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", }, }, }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-northeast-2": endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "ap-northeast-3": endpoint{ - Hostname: "s3-control.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-incidents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-3", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-south-1": endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", }, - "ca-central-1": endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, - }, - "ca-central-1-fips": endpoint{ - Hostname: "s3-control-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, - }, - "eu-central-1": endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "eu-north-1": endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", }, - "eu-west-1": endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "eu-west-2": endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", }, - "eu-west-3": endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-3", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "sa-east-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "sts.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2-fips": endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", }, - "us-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", }, - "us-west-2-fips": endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "savingsplans": service{ + "support": service{ PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "savingsplans.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "support.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, }, }, - "schemas": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", }, }, }, - "securityhub": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-1": endpoint{ + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ Protocols: []string{"https"}, }, - "ap-northeast-2": endpoint{ + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", Protocols: []string{"https"}, }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", }, - "eu-north-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", }, - "eu-west-3": endpoint{ - Protocols: []string{"https"}, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "transcribestreaming-fips-ca-central-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "transcribestreaming-fips-us-east-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "transcribestreaming-fips-us-east-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "transcribestreaming-fips-us-west-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "transcribestreaming-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "transcribestreaming-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "transcribestreaming-us-east-2", + }: endpoint{ CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "transcribestreaming-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "transcribestreaming-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", }, - }, - }, - "servicecatalog-appregistry": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "servicediscovery-fips": endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "servicequotas": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - }, - "session.qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", }, - "fips-us-west-2": endpoint{ - Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Deprecated: boxedTrue, + }, + }, + }, + "voiceid": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "shield": service{ + "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "shield.us-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws", + }: endpoint{ CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-aws-global": endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "aws", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "aws-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "waf.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "snowball": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ap-northeast-1": endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, }, - "fips-ap-northeast-2": endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, - "fips-ap-northeast-3": endpoint{ - Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, }, - "fips-ap-south-1": endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, }, - "fips-ap-southeast-1": endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, }, - "fips-ap-southeast-2": endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, - "fips-ca-central-1": endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, }, - "fips-eu-central-1": endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, - "fips-eu-west-1": endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "eu-central-1", }, }, - "fips-eu-west-2": endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-2", + Region: "eu-north-1", }, }, - "fips-eu-west-3": endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-3", + Region: "eu-north-1", }, }, - "fips-sa-east-1": endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "sa-east-1", + Region: "eu-south-1", }, }, - "fips-us-east-1": endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-south-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-west-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-west-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-west-2", }, }, - "fips-us-east-2": endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-west-3", }, }, - "fips-us-west-1": endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-west-3", }, }, - "fips-us-west-2": endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "af-south-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-northeast-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-northeast-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-northeast-3", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "ap-southeast-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-southeast-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-southeast-3", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-central-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-north-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-south-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "eu-west-3", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "me-south-1", }, + Deprecated: boxedTrue, }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "sa-east-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{ - Hostname: "sts.amazonaws.com", + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "me-south-1", }, }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "me-south-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "sa-east-1", }, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "sa-east-1", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "support.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, }, - "fips-us-east-2": endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "fips-us-west-1": endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "wafv2.af-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "af-south-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "af-south-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "wafv2.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-east-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-east-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribestreaming": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transfer": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "transfer-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "wafv2.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "ap-northeast-1", }, }, - "fips-us-east-1": endpoint{ - Hostname: "transfer-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "transfer-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "wafv2.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-northeast-2", }, }, - "fips-us-west-1": endpoint{ - Hostname: "transfer-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-northeast-2", }, }, - "fips-us-west-2": endpoint{ - Hostname: "transfer-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "wafv2.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-northeast-3", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-3", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "wafv2.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-south-1", }, }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-south-1", }, }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-fips": endpoint{ - Hostname: "waf-fips.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "wafv2.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-southeast-1", }, }, - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-southeast-1", }, }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{ - Hostname: "waf-regional.af-south-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "wafv2.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "af-south-1", + Region: "ap-southeast-2", }, }, - "ap-east-1": endpoint{ - Hostname: "waf-regional.ap-east-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-east-1", + Region: "ap-southeast-2", }, }, - "ap-northeast-1": endpoint{ - Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "wafv2.ap-southeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "ap-southeast-3", }, }, - "ap-northeast-2": endpoint{ - Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "ap-southeast-3", }, }, - "ap-northeast-3": endpoint{ - Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "wafv2.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-3", + Region: "ca-central-1", }, }, - "ap-south-1": endpoint{ - Hostname: "waf-regional.ap-south-1.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "ca-central-1", }, }, - "ap-southeast-1": endpoint{ - Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "wafv2.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "eu-central-1", }, }, - "ap-southeast-2": endpoint{ - Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "eu-central-1", }, }, - "ca-central-1": endpoint{ - Hostname: "waf-regional.ca-central-1.amazonaws.com", + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "wafv2.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "eu-north-1", }, }, - "eu-central-1": endpoint{ - Hostname: "waf-regional.eu-central-1.amazonaws.com", + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "eu-north-1", }, }, - "eu-north-1": endpoint{ - Hostname: "waf-regional.eu-north-1.amazonaws.com", + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "wafv2.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-north-1", + Region: "eu-south-1", }, }, - "eu-south-1": endpoint{ - Hostname: "waf-regional.eu-south-1.amazonaws.com", + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, }, - "eu-west-1": endpoint{ - Hostname: "waf-regional.eu-west-1.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "wafv2.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, }, - "eu-west-2": endpoint{ - Hostname: "waf-regional.eu-west-2.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "wafv2.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, }, - "eu-west-3": endpoint{ - Hostname: "waf-regional.eu-west-3.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "wafv2.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, }, - "fips-af-south-1": endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, + Deprecated: boxedTrue, }, - "fips-ap-east-1": endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-1": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-2": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-3": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, + Deprecated: boxedTrue, }, - "fips-ap-south-1": endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-1": endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-2": endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, + Deprecated: boxedTrue, }, - "fips-ca-central-1": endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-central-1": endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-north-1": endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, + Deprecated: boxedTrue, }, - "fips-eu-south-1": endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-1": endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-2": endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, + Deprecated: boxedTrue, }, - "fips-eu-west-3": endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, + Deprecated: boxedTrue, }, - "fips-me-south-1": endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, + Deprecated: boxedTrue, }, - "fips-sa-east-1": endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "wafv2.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "wafv2.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, }, - "fips-us-west-1": endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "wafv2.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, }, - "me-south-1": endpoint{ - Hostname: "waf-regional.me-south-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "wafv2.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "me-south-1", + Region: "us-east-2", }, }, - "sa-east-1": endpoint{ - Hostname: "waf-regional.sa-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "sa-east-1", + Region: "us-east-2", }, }, - "us-east-1": endpoint{ - Hostname: "waf-regional.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "wafv2.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, }, - "us-east-2": endpoint{ - Hostname: "waf-regional.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-1", }, }, - "us-west-1": endpoint{ - Hostname: "waf-regional.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "wafv2.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-west-2", }, }, - "us-west-2": endpoint{ - Hostname: "waf-regional.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, }, }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "wisdom": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "workdocs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "workdocs-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "workmail": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "workspaces": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "workspaces-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "workspaces-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + }, + }, + "workspaces-web": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "xray": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "xray-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "xray-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "xray-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "xray-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, }, @@ -7706,10 +24380,36 @@ var awscnPartition = partition{ return reg }(), }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, Regions: regions{ "cn-north-1": region{ @@ -7721,29 +24421,52 @@ var awscnPartition = partition{ }, Services: services{ "access-analyzer": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "account": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "account.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "acm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "api.ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, }, - "cn-northwest-1": endpoint{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -7752,80 +24475,179 @@ var awscnPartition = partition{ }, }, "api.sagemaker": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.tunneling.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "apigateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "appsync": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "athena": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "backup": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "batch": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "budgets": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ Hostname: "budgets.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -7836,9 +24658,10 @@ var awscnPartition = partition{ "ce": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ Hostname: "ce.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -7846,19 +24669,33 @@ var awscnPartition = partition{ }, }, }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "cloudformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "cloudfront": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ @@ -7868,77 +24705,182 @@ var awscnPartition = partition{ }, }, "cloudtrail": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "codebuild": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "codecommit": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "codedeploy": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "cognito-identity": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "config": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "cur": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "dax": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "directconnect": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "dms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "docdb": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ Hostname: "rds.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -7947,187 +24889,338 @@ var awscnPartition = partition{ }, }, "ds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "ebs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "ecs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "elasticache": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "elasticfilesystem": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - "fips-cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, + Deprecated: boxedTrue, }, - "fips-cn-northwest-1": endpoint{ + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, + Deprecated: boxedTrue, }, }, }, "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "es": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "events": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "firehose": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "fsx": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "gamelift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "glue": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "greengrass": service{ IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, }, }, "guardduty": service{ IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "health": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "iam": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ Hostname: "iam.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", @@ -8136,32 +25229,41 @@ var awscnPartition = partition{ }, }, "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "iotanalytics": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, }, }, "iotevents": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, }, }, "ioteventsdata": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", @@ -8170,72 +25272,119 @@ var awscnPartition = partition{ }, }, "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, }, }, "kafka": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "kinesisanalytics": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "kms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "lakeformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "lambda": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "license-manager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "logs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "mediaconvert": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -8243,32 +25392,54 @@ var awscnPartition = partition{ }, }, }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "mq": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "neptune": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ Hostname: "rds.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, }, - "cn-northwest-1": endpoint{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ Hostname: "rds.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -8279,9 +25450,10 @@ var awscnPartition = partition{ "organizations": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -8290,51 +25462,76 @@ var awscnPartition = partition{ }, }, "personalize": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "polly": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "ram": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "redshift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "resource-groups": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "route53": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ Hostname: "route53.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -8343,198 +25540,326 @@ var awscnPartition = partition{ }, }, "route53resolver": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "accesspoint-cn-north-1": endpoint{ - Hostname: "s3-accesspoint.cn-north-1.amazonaws.com.cn", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, - "accesspoint-cn-northwest-1": endpoint{ - Hostname: "s3-accesspoint.cn-northwest-1.amazonaws.com.cn", + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, }, }, "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ Hostname: "s3-control.cn-north-1.amazonaws.com.cn", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "cn-north-1", }, }, - "cn-northwest-1": endpoint{ + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "cn-northwest-1", }, }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "secretsmanager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "securityhub": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ Protocols: []string{"https"}, }, - "cn-northwest-1": endpoint{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ Protocols: []string{"https"}, }, }, }, "servicecatalog": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "servicediscovery": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "sms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "snowball": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - "fips-cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, + Deprecated: boxedTrue, }, - "fips-cn-northwest-1": endpoint{ + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, + Deprecated: boxedTrue, }, }, }, "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "ssm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "states": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "storagegateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "sts": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "support": service{ PartitionEndpoint: "aws-cn-global", - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ Hostname: "support.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", @@ -8543,31 +25868,53 @@ var awscnPartition = partition{ }, }, "swf": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "tagging": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, }, - "cn-northwest-1": endpoint{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", @@ -8576,59 +25923,152 @@ var awscnPartition = partition{ }, }, "transcribestreaming": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "transfer": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "waf-regional": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, }, - "cn-northwest-1": endpoint{ + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, }, - "fips-cn-north-1": endpoint{ + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, + Deprecated: boxedTrue, }, - "fips-cn-northwest-1": endpoint{ + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, + Deprecated: boxedTrue, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "wafv2.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, }, }, }, "workspaces": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "xray": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, }, @@ -8649,10 +26089,36 @@ var awsusgovPartition = partition{ return reg }(), }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, Regions: regions{ "us-gov-east-1": region{ @@ -8664,15 +26130,18 @@ var awsusgovPartition = partition{ }, Services: services{ "access-analyzer": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -8681,15 +26150,26 @@ var awsusgovPartition = partition{ }, }, "acm": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "acm.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "acm.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -8698,232 +26178,675 @@ var awsusgovPartition = partition{ }, }, "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "acm-pca.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "acm-pca.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "api.detective": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "api.ecr": service{ - - Endpoints: endpoints{ - "fips-dkr-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dkr-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-east-1", + }: endpoint{ Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-dkr-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-dkr-us-gov-west-1", + }: endpoint{ Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-east-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "api.ecr.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "api.ecr.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-fips-secondary", + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "us-gov-west-1-fips-secondary": endpoint{ + endpointKey{ + Region: "us-gov-west-1-secondary", + Variant: fipsVariant, + }: endpoint{ Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", }, }, }, "apigateway": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + }, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, }, }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, }, }, - Endpoints: endpoints{ - "fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, }, }, "athena": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "athena-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "athena-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, }, }, "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, }, }, "backup": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "batch": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "batch.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "batch.us-gov-west-1.amazonaws.com", + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "clouddirectory": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "cloudformation.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "cloudformation.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -8932,206 +26855,516 @@ var awsusgovPartition = partition{ }, }, "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "cloudtrail": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", }, }, }, "codebuild": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "codecommit": service{ - - Endpoints: endpoints{ - "fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "codedeploy": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "codepipeline": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, "cognito-identity": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + "config": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Service: "iotdata", }, }, - "us-gov-west-1": endpoint{}, }, - }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Service: "iotdata", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, - "config": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "config.us-gov-east-1.amazonaws.com", + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "config.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "connect": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "datasync": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "directconnect": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "directconnect.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "directconnect.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9139,23 +27372,96 @@ var awsusgovPartition = partition{ }, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "dms.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "docdb": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "rds.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9164,60 +27470,124 @@ var awsusgovPartition = partition{ }, }, "ds": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "ds-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "ds-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "dynamodb": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "dynamodb.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "dynamodb.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "ebs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "ec2": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "ec2.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "ec2.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9226,68 +27596,151 @@ var awsusgovPartition = partition{ }, }, "ecs": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "eks.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "eks.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "elasticache": service{ - - Endpoints: endpoints{ - "fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "elasticache.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9296,177 +27749,450 @@ var awsusgovPartition = partition{ }, }, "elasticfilesystem": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, }, }, "elasticmapreduce": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, }, }, }, "email": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "email-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, "es": service{ - - Endpoints: endpoints{ - "fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "es-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "events": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "events.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "events.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", }, }, }, "firehose": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "fms-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "fms-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "fsx": service{ - - Endpoints: endpoints{ - "fips-prod-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "fips-prod-us-gov-west-1": endpoint{ + endpointKey{ + Region: "prod-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "glacier": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "glacier.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "glacier.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ @@ -9476,55 +28202,98 @@ var awsusgovPartition = partition{ }, }, "glue": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "glue-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "glue-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "greengrass": service{ IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "dataplane-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dataplane-us-gov-east-1", + }: endpoint{ Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "dataplane-us-gov-west-1": endpoint{ + endpointKey{ + Region: "dataplane-us-gov-west-1", + }: endpoint{ Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "fips-us-gov-east-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "greengrass.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9534,122 +28303,415 @@ var awsusgovPartition = partition{ }, "guardduty": service{ IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "guardduty.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "guardduty.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "health": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "health-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identitystore": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.{region}.{dnsSuffix}", }, }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "iam-govcloud-fips": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", }, }, }, "inspector": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "iot-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Service: "execute-api", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "iot-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Service: "execute-api", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + }, }, }, "kafka": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", + }, }, }, "kinesis": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "kinesis.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "kinesis.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9658,96 +28720,229 @@ var awsusgovPartition = partition{ }, }, "kinesisanalytics": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "kms-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "lakeformation": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, "lambda": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "license-manager": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "logs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "logs.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "logs.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", }, }, }, "mediaconvert": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9755,81 +28950,201 @@ var awsusgovPartition = partition{ }, }, }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, }, }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "monitoring": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "monitoring.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "monitoring.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "mq": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "mq-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "mq-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "neptune": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "rds.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "rds.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9837,35 +29152,125 @@ var awsusgovPartition = partition{ }, }, }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ Hostname: "organizations.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "fips-aws-us-gov-global": endpoint{ + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ Hostname: "organizations.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "outposts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "outposts.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "outposts.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9874,48 +29279,107 @@ var awsusgovPartition = partition{ }, }, "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, }, }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "pinpoint.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "polly": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-west-1": endpoint{}, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "ram": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "ram.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "ram.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9924,34 +29388,84 @@ var awsusgovPartition = partition{ }, }, "rds": service{ - - Endpoints: endpoints{ - "rds.us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-gov-east-1", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "rds.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", }, - "rds.us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "rds.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "redshift": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "redshift.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "redshift.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -9960,220 +29474,552 @@ var awsusgovPartition = partition{ }, }, "rekognition": service{ - - Endpoints: endpoints{ - "rekognition-fips.us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rekognition-fips.us-gov-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, }, }, "resource-groups": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "resource-groups.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "resource-groups.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "route53": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ Hostname: "route53.us-gov.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "fips-aws-us-gov-global": endpoint{ + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ Hostname: "route53.us-gov.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "route53resolver": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, }, }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "accesspoint-us-gov-east-1": endpoint{ - Hostname: "s3-accesspoint.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, }, - "accesspoint-us-gov-west-1": endpoint{ - Hostname: "s3-accesspoint.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, }, - "fips-accesspoint-us-gov-east-1": endpoint{ - Hostname: "s3-accesspoint-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, }, - "fips-accesspoint-us-gov-west-1": endpoint{ - Hostname: "s3-accesspoint-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "s3-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "s3.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "s3.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, }, }, "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "s3-control.us-gov-east-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-east-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "s3-control.us-gov-west-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "secretsmanager": service{ + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "securityhub": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", Protocols: []string{"https"}, CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, CredentialScope: credentialScope{ @@ -10183,130 +30029,332 @@ var awsusgovPartition = partition{ }, }, "servicecatalog": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "servicecatalog-appregistry": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "servicediscovery", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "servicequotas": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "servicequotas.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "servicequotas.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "sms": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "sms-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "sms-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "snowball": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "sns": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "sns.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, }, }, }, "sqs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "sqs.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "sqs.us-gov-west-1.amazonaws.com", SSLCommonName: "{region}.queue.{dnsSuffix}", Protocols: []string{"http", "https"}, @@ -10317,126 +30365,318 @@ var awsusgovPartition = partition{ }, }, "ssm": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "ssm.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "ssm.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "states": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "states-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "states.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "storagegateway": service{ - - Endpoints: endpoints{ - "fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "streams.dynamodb": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "dynamodb", + }, }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "sts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "sts.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "sts.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "support": service{ PartitionEndpoint: "aws-us-gov-global", - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "support.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "support.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "swf": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "swf.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "swf.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -10444,90 +30684,330 @@ var awsusgovPartition = partition{ }, }, }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "tagging": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + }, }, }, "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "transfer": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "translate-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, - "waf-regional": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{ - Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "wafv2.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ - Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "wafv2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -10535,34 +31015,65 @@ var awsusgovPartition = partition{ }, }, "workspaces": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, "xray": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "xray-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "xray-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, }, @@ -10583,210 +31094,419 @@ var awsisoPartition = partition{ return reg }(), }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, Regions: regions{ "us-iso-east-1": region{ Description: "US ISO East", }, + "us-iso-west-1": region{ + Description: "US ISO WEST", + }, }, Services: services{ "api.ecr": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, }, }, "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "apigateway": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "autoscaling": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "cloudformation": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "cloudtrail": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "codedeploy": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "config": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "datapipeline": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "directconnect": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ Hostname: "dms.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, }, - "us-iso-east-1": endpoint{}, }, }, "ds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "dynamodb": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "ec2": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "ecs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "elasticache": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "elasticfilesystem": service{ - - Endpoints: endpoints{ - "fips-us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", }, - "us-iso-east-1": endpoint{}, }, }, "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "es": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "events": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "firehose": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "glacier": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "health": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "iam": service{ PartitionEndpoint: "aws-iso-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ Hostname: "iam.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", @@ -10795,89 +31515,156 @@ var awsisoPartition = partition{ }, }, "kinesis": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, }, - "us-iso-east-1": endpoint{}, }, }, "lambda": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "license-manager": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "logs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "medialive": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "mediapackage": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "monitoring": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "outposts": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "ram": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "rds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "redshift": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "route53": service{ PartitionEndpoint: "aws-iso-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ Hostname: "route53.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", @@ -10885,87 +31672,126 @@ var awsisoPartition = partition{ }, }, }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "secretsmanager": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "snowball": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "sns": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sqs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "ssm": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "states": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sts": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "support": service{ PartitionEndpoint: "aws-iso-global", - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ Hostname: "support.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", @@ -10974,37 +31800,65 @@ var awsisoPartition = partition{ }, }, "swf": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "transcribestreaming": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "workspaces": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, }, @@ -11025,10 +31879,20 @@ var awsisobPartition = partition{ return reg }(), }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, Regions: regions{ "us-isob-east-1": region{ @@ -11037,9 +31901,10 @@ var awsisobPartition = partition{ }, Services: services{ "api.ecr": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", @@ -11047,142 +31912,266 @@ var awsisobPartition = partition{ }, }, }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "cloudformation": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "cloudtrail": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "codedeploy": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "config": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "directconnect": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, }, - "us-isob-east-1": endpoint{}, }, }, "ds": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "ecs": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "elasticache": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ Protocols: []string{"https"}, }, }, }, "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "es": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "events": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "glacier": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "health": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "iam": service{ PartitionEndpoint: "aws-iso-b-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", @@ -11191,65 +32180,99 @@ var awsisobPartition = partition{ }, }, "kinesis": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, }, - "us-isob-east-1": endpoint{}, }, }, "lambda": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "license-manager": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "logs": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "monitoring": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "rds": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "redshift": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "route53": service{ PartitionEndpoint: "aws-iso-b-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ Hostname: "route53.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", @@ -11258,71 +32281,92 @@ var awsisobPartition = partition{ }, }, "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "snowball": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "ssm": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "states": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "sts": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "support": service{ PartitionEndpoint: "aws-iso-b-global", - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ Hostname: "support.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", @@ -11331,15 +32375,31 @@ var awsisobPartition = partition{ }, }, "swf": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "tagging": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 8e8636f..8809861 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -8,6 +8,41 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" ) +// A Logger is a minimalistic interface for the SDK to log messages to. +type Logger interface { + Log(...interface{}) +} + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution +// behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint + // resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + // Options provide the configuration needed to direct how the // endpoints will be resolved. type Options struct { @@ -21,8 +56,19 @@ type Options struct { // be returned. This endpoint may not be valid. If StrictMatching is // enabled only services that are known to support dualstack will return // dualstack endpoints. + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. UseDualStack bool + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint FIPSEndpointState + // Enables strict matching of services and regions resolved endpoints. // If the partition doesn't enumerate the exact service and region an // error will be returned. This option will prevent returning endpoints @@ -56,6 +102,30 @@ type Options struct { // S3 Regional Endpoint flag helps with resolving the S3 endpoint S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Logger is the logger that will be used to log messages. + Logger Logger + + // Determines whether logging of deprecated endpoints usage is enabled. + LogDeprecated bool +} + +func (o Options) getEndpointVariant(service string) (v endpointVariant) { + const s3 = "s3" + const s3Control = "s3-control" + + if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) || + ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) { + v |= dualStackVariant + } + if o.UseFIPSEndpoint == FIPSEndpointStateEnabled { + v |= fipsVariant + } + return v } // EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. @@ -196,10 +266,25 @@ func DisableSSLOption(o *Options) { // UseDualStackOption sets the UseDualStack option. Can be used as a functional // option when resolving endpoints. +// +// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint. +// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option. func UseDualStackOption(o *Options) { o.UseDualStack = true } +// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseDualStackEndpointOption(o *Options) { + o.UseDualStackEndpoint = DualStackEndpointStateEnabled +} + +// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseFIPSEndpointOption(o *Options) { + o.UseFIPSEndpoint = FIPSEndpointStateEnabled +} + // StrictMatchingOption sets the StrictMatching option. Can be used as a functional // option when resolving endpoints. func StrictMatchingOption(o *Options) { @@ -407,7 +492,7 @@ func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (Resolve func (r Region) Services() map[string]Service { ss := map[string]Service{} for id, s := range r.p.Services { - if _, ok := s.Endpoints[r.id]; ok { + if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok { ss[id] = Service{ id: id, p: r.p, @@ -452,9 +537,12 @@ func (s Service) Regions() map[string]Region { } for id := range service.Endpoints { - if r, ok := s.p.Regions[id]; ok { - rs[id] = Region{ - id: id, + if id.Variant != 0 { + continue + } + if r, ok := s.p.Regions[id.Region]; ok { + rs[id.Region] = Region{ + id: id.Region, desc: r.Description, p: s.p, } @@ -472,8 +560,11 @@ func (s Service) Regions() map[string]Region { func (s Service) Endpoints() map[string]Endpoint { es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) for id := range s.p.Services[s.id].Endpoints { - es[id] = Endpoint{ - id: id, + if id.Variant != 0 { + continue + } + es[id.Region] = Endpoint{ + id: id.Region, serviceID: s.id, p: s.p, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index c6c6a03..89f6627 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -1,6 +1,7 @@ package endpoints import ( + "encoding/json" "fmt" "regexp" "strconv" @@ -12,6 +13,34 @@ const ( ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" ) +const dnsSuffixTemplateKey = "{dnsSuffix}" + +// defaultKey is a compound map key of a variant and other values. +type defaultKey struct { + Variant endpointVariant + ServiceVariant serviceVariant +} + +// endpointKey is a compound map key of a region and associated variant value. +type endpointKey struct { + Region string + Variant endpointVariant +} + +// endpointVariant is a bit field to describe the endpoints attributes. +type endpointVariant uint64 + +// serviceVariant is a bit field to describe the service endpoint attributes. +type serviceVariant uint64 + +const ( + // fipsVariant indicates that the endpoint is FIPS capable. + fipsVariant endpointVariant = 1 << (64 - 1 - iota) + + // dualStackVariant indicates that the endpoint is DualStack capable. + dualStackVariant +) + var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) type partitions []partition @@ -20,8 +49,12 @@ func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) var opt Options opt.Set(opts...) + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + if !ps[i].canResolveEndpoint(service, region, opt) { continue } @@ -49,14 +82,76 @@ func (ps partitions) Partitions() []Partition { return parts } +type endpointWithVariants struct { + endpoint + Variants []endpointWithTags `json:"variants"` +} + +type endpointWithTags struct { + endpoint + Tags []string `json:"tags"` +} + +type endpointDefaults map[defaultKey]endpoint + +func (p *endpointDefaults) UnmarshalJSON(data []byte) error { + if *p == nil { + *p = make(endpointDefaults) + } + + var e endpointWithVariants + if err := json.Unmarshal(data, &e); err != nil { + return err + } + + (*p)[defaultKey{Variant: 0}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*p)[defaultKey{Variant: endpointVariant}] = ve + } + + return nil +} + +func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) { + if len(tags) == 0 { + unknown = true + return + } + + for _, tag := range tags { + switch { + case strings.EqualFold("fips", tag): + ev |= fipsVariant + case strings.EqualFold("dualstack", tag): + ev |= dualStackVariant + default: + unknown = true + } + } + return ev, unknown +} + type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpoint `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpointDefaults `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` } func (p partition) Partition() Partition { @@ -67,15 +162,18 @@ func (p partition) Partition() Partition { } } -func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { +func (p partition) canResolveEndpoint(service, region string, options Options) bool { s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[region] + _, hasEndpoint := s.Endpoints[endpointKey{ + Region: region, + Variant: options.getEndpointVariant(service), + }] if hasEndpoint && hasService { return true } - if strictMatch { + if options.StrictMatching { return false } @@ -106,6 +204,10 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( var opt Options opt.Set(opts...) + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + s, hasService := p.Services[service] if service == Ec2metadataServiceID && !hasService { @@ -123,21 +225,44 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( region = s.PartitionEndpoint } - if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || - (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { - if _, ok := legacyGlobalRegions[service][region]; ok { - region = "aws-global" - } + if r, ok := isLegacyGlobalRegion(service, region, opt); ok { + region = r + } + + variant := opt.getEndpointVariant(service) + + endpoints := s.Endpoints + + serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}] + // If we searched for a variant which may have no explicit service defaults, + // then we need to inherit the standard service defaults except the hostname and dnsSuffix + if variant != 0 && !hasServiceDefault { + serviceDefaults = s.Defaults[defaultKey{}] + serviceDefaults.Hostname = "" + serviceDefaults.DNSSuffix = "" } - e, hasEndpoint := s.endpointForRegion(region) - if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}] + + var dnsSuffix string + if len(serviceDefaults.DNSSuffix) > 0 { + dnsSuffix = serviceDefaults.DNSSuffix + } else if variant == 0 { + // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for + // a non-variant endpoint then we need to set the dnsSuffix. + dnsSuffix = p.DNSSuffix + } + + noDefaults := !hasServiceDefault && !hasPartitionDefault + + e, hasEndpoint := s.endpointForRegion(region, endpoints, variant) + if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant)) } - defs := []endpoint{p.Defaults, s.Defaults} + defs := []endpoint{partitionDefaults, serviceDefaults} - return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) + return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt) } func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { @@ -165,6 +290,31 @@ func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointMod } } +func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) { + if opt.getEndpointVariant(service) != 0 { + return "", false + } + + const ( + sts = "sts" + s3 = "s3" + awsGlobal = "aws-global" + ) + + switch { + case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint: + return region, false + case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint: + return region, false + default: + if _, ok := legacyGlobalRegions[service][region]; ok { + return awsGlobal, true + } + } + + return region, false +} + func serviceList(ss services) []string { list := make([]string, 0, len(ss)) for k := range ss { @@ -172,10 +322,13 @@ func serviceList(ss services) []string { } return list } -func endpointList(es endpoints) []string { +func endpointList(es serviceEndpoints, variant endpointVariant) []string { list := make([]string, 0, len(es)) for k := range es { - list = append(list, k) + if k.Variant != variant { + continue + } + list = append(list, k.Region) } return list } @@ -207,19 +360,19 @@ type region struct { type services map[string]service type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpoint `json:"defaults"` - Endpoints endpoints `json:"endpoints"` + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpointDefaults `json:"defaults"` + Endpoints serviceEndpoints `json:"endpoints"` } -func (s *service) endpointForRegion(region string) (endpoint, bool) { - if e, ok := s.Endpoints[region]; ok { +func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) { + if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok { return e, true } if s.IsRegionalized == boxedFalse { - return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint } // Unable to find any matching endpoint, return @@ -227,22 +380,73 @@ func (s *service) endpointForRegion(region string) (endpoint, bool) { return endpoint{}, false } -type endpoints map[string]endpoint +type serviceEndpoints map[endpointKey]endpoint + +func (s *serviceEndpoints) UnmarshalJSON(data []byte) error { + if *s == nil { + *s = make(serviceEndpoints) + } + + var regionToEndpoint map[string]endpointWithVariants + + if err := json.Unmarshal(data, ®ionToEndpoint); err != nil { + return err + } + + for region, e := range regionToEndpoint { + (*s)[endpointKey{Region: region}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve + } + } + + return nil +} type endpoint struct { Hostname string `json:"hostname"` Protocols []string `json:"protocols"` CredentialScope credentialScope `json:"credentialScope"` - // Custom fields not modeled - HasDualStack boxedBool `json:"-"` - DualStackHostname string `json:"-"` + DNSSuffix string `json:"dnsSuffix"` // Signature Version not used SignatureVersions []string `json:"signatureVersions"` // SSLCommonName not used. SSLCommonName string `json:"sslCommonName"` + + Deprecated boxedBool `json:"deprecated"` +} + +// isZero returns whether the endpoint structure is an empty (zero) value. +func (e endpoint) isZero() bool { + switch { + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (credentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + case len(e.SSLCommonName) != 0: + return false + } + return true } const ( @@ -271,7 +475,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { +func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -292,23 +496,26 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ } hostname := e.Hostname - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - region = signingRegion - } if !validateInputRegion(region) { return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") } + if len(merged.DNSSuffix) > 0 { + dnsSuffix = merged.DNSSuffix + } + u := strings.Replace(hostname, "{service}", service, 1) u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1) scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) u = fmt.Sprintf("%s://%s", scheme, u) + if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil { + opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u)) + } + return ResolvedEndpoint{ URL: u, PartitionID: partitionID, @@ -346,11 +553,11 @@ func (e *endpoint) mergeIn(other endpoint) { if len(other.SSLCommonName) > 0 { e.SSLCommonName = other.SSLCommonName } - if other.HasDualStack != boxedBoolUnset { - e.HasDualStack = other.HasDualStack + if len(other.DNSSuffix) > 0 { + e.DNSSuffix = other.DNSSuffix } - if len(other.DualStackHostname) > 0 { - e.DualStackHostname = other.DualStackHostname + if other.Deprecated != boxedBoolUnset { + e.Deprecated = other.Deprecated } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index db6efd6..84922bc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -155,18 +155,71 @@ func serviceSet(ps partitions) map[string]struct{} { return set } +func endpointVariantSetter(variant endpointVariant) (string, error) { + if variant == 0 { + return "0", nil + } + + if variant > (fipsVariant | dualStackVariant) { + return "", fmt.Errorf("unknown endpoint variant") + } + + var symbols []string + if variant&fipsVariant != 0 { + symbols = append(symbols, "fipsVariant") + } + if variant&dualStackVariant != 0 { + symbols = append(symbols, "dualStackVariant") + } + v := strings.Join(symbols, "|") + + return v, nil +} + +func endpointKeySetter(e endpointKey) (string, error) { + var sb strings.Builder + sb.WriteString("endpointKey{\n") + sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region)) + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + +func defaultKeySetter(e defaultKey) (string, error) { + var sb strings.Builder + sb.WriteString("defaultKey{\n") + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, + "EndpointVariantSetter": endpointVariantSetter, + "EndpointKeySetter": endpointKeySetter, + "DefaultKeySetter": defaultKeySetter, } const v3Tmpl = ` @@ -272,9 +325,9 @@ partition{ {{ StringIfSet "Name: %q,\n" .Name -}} {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults }}, - {{- end }} + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} Regions: {{ template "gocode Regions" .Regions }}, Services: {{ template "gocode Services" .Services }}, } @@ -315,19 +368,27 @@ services{ service{ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults -}}, - {{- end }} + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} {{ if .Endpoints -}} Endpoints: {{ template "gocode Endpoints" .Endpoints }}, {{- end }} } {{- end }} +{{ define "gocode Defaults" -}} +endpointDefaults{ + {{ range $id, $endpoint := . -}} + {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + {{ define "gocode Endpoints" -}} -endpoints{ +serviceEndpoints{ {{ range $id, $endpoint := . -}} - "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, {{ end }} } {{- end }} @@ -335,6 +396,7 @@ endpoints{ {{ define "gocode Endpoint" -}} endpoint{ {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} @@ -344,9 +406,7 @@ endpoint{ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} }, {{- end }} - {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} - {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} - + {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}} } {{- end }} ` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index 6ed15b2..49674cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -77,6 +77,9 @@ const ( // wire unmarshaled message content of requests and responses made while // using the SDK Will also enable LogDebug. LogDebugWithEventStreamBody + + // LogDebugWithDeprecated states the SDK should log details about deprecated functionality. + LogDebugWithDeprecated ) // A Logger is a minimalistic interface for the SDK to log messages to. Should diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index e819ab6..9556332 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -330,6 +330,9 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { // WithSetRequestHeaders updates the operation request's HTTP header to contain // the header key value pairs provided. If the header key already exists in the // request's HTTP header set, the existing value(s) will be replaced. +// +// Header keys added will be added as canonical format with title casing +// applied via http.Header.Set method. func WithSetRequestHeaders(h map[string]string) Option { return withRequestHeader(h).SetRequestHeaders } @@ -338,6 +341,6 @@ type withRequestHeader map[string]string func (h withRequestHeader) SetRequestHeaders(r *Request) { for k, v := range h { - r.HTTPRequest.Header[k] = []string{v} + r.HTTPRequest.Header.Set(k, v) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index fb0a68f..636d9ec 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "io/ioutil" "net/http" "net/url" "reflect" @@ -525,6 +526,14 @@ func (r *Request) GetBody() io.ReadSeeker { // Send will not close the request.Request's body. func (r *Request) Send() error { defer func() { + // Ensure a non-nil HTTPResponse parameter is set to ensure handlers + // checking for HTTPResponse values, don't fail. + if r.HTTPResponse == nil { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + Body: ioutil.NopCloser(&bytes.Buffer{}), + } + } // Regardless of success or failure of the request trigger the Complete // request handlers. r.Handlers.Complete.Run(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 752ae47..3f0001f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -15,8 +15,8 @@ import ( // and determine if a request API error should be retried. // // client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle -// methods to determine if the request is retried. +// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to +// determine if the request is retried. type Retryer interface { // RetryRules return the retry delay that should be used by the SDK before // making another request attempt for the failed request. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 3efdac2..1d3f4c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -14,8 +14,17 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/sts" ) +// CredentialsProviderOptions specifies additional options for configuring +// credentials providers. +type CredentialsProviderOptions struct { + // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, + // such as setting its ExpiryWindow. + WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) +} + func resolveCredentials(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, @@ -40,6 +49,7 @@ func resolveCredentials(cfg *aws.Config, envCfg.WebIdentityTokenFilePath, envCfg.RoleARN, envCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, ) default: @@ -59,6 +69,7 @@ var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, " func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, filepath string, roleARN, sessionName string, + credOptions *CredentialsProviderOptions, ) (*credentials.Credentials, error) { if len(filepath) == 0 { @@ -69,17 +80,18 @@ func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, return nil, WebIdentityEmptyRoleARNErr } - creds := stscreds.NewWebIdentityCredentials( - &Session{ - Config: cfg, - Handlers: handlers.Copy(), - }, - roleARN, - sessionName, - filepath, - ) + svc := sts.New(&Session{ + Config: cfg, + Handlers: handlers.Copy(), + }) - return creds, nil + var optFns []func(*stscreds.WebIdentityRoleProvider) + if credOptions != nil && credOptions.WebIdentityRoleProviderOptions != nil { + optFns = append(optFns, credOptions.WebIdentityRoleProviderOptions) + } + + p := stscreds.NewWebIdentityRoleProviderWithOptions(svc, roleARN, sessionName, stscreds.FetchTokenPath(filepath), optFns...) + return credentials.NewCredentials(p), nil } func resolveCredsFromProfile(cfg *aws.Config, @@ -114,6 +126,7 @@ func resolveCredsFromProfile(cfg *aws.Config, sharedCfg.WebIdentityTokenFile, sharedCfg.RoleARN, sharedCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, ) case sharedCfg.hasSSOConfiguration(): diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 43b5686..ff3cc01 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -285,5 +285,83 @@ The custom EC2 IMDS endpoint can also be specified via the Session options. sess, err := session.NewSessionWithOptions(session.Options{ EC2MetadataEndpoint: "http://[::1]", }) + +FIPS and DualStack Endpoints + +The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. + +You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable +or disable FIPS endpoint resolution. + + AWS_USE_FIPS_ENDPOINT=true + +To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable +or disable FIPS endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_fips_endpoint=true + +To configure a FIPS endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + +You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to +enable or disable DualStack endpoint resolution. + + AWS_USE_DUALSTACK_ENDPOINT=true + +To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable +or disable DualStack endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_dualstack_endpoint=true + +To configure a DualStack endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) */ package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index fffe2f3..d6fa247 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -170,6 +170,18 @@ type envConfig struct { // // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint endpoints.FIPSEndpointState } var ( @@ -248,6 +260,12 @@ var ( useClientTLSKey = []string{ "AWS_SDK_GO_CLIENT_TLS_KEY", } + awsUseDualStackEndpoint = []string{ + "AWS_USE_DUALSTACK_ENDPOINT", + } + awsUseFIPSEndpoint = []string{ + "AWS_USE_FIPS_ENDPOINT", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -376,6 +394,14 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { return envConfig{}, err } + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil { + return cfg, err + } + return cfg, nil } @@ -401,3 +427,45 @@ func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []str } return nil } + +func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 4b2e057..4293dbe 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,6 +8,7 @@ import ( "io/ioutil" "net/http" "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -303,6 +304,11 @@ type Options struct { // // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies options for creating credential providers. + // These are only used if the aws.Config does not already + // include credentials. + CredentialsProviderOptions *CredentialsProviderOptions } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -792,6 +798,20 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion } + for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} { + if v != endpoints.DualStackEndpointStateUnset { + cfg.UseDualStackEndpoint = v + break + } + } + + for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} { + if v != endpoints.FIPSEndpointStateUnset { + cfg.UseFIPSEndpoint = v + break + } + } + return nil } @@ -845,8 +865,10 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) + resolvedRegion := normalizeRegion(s.Config) + region := aws.StringValue(s.Config.Region) - resolved, err := s.resolveEndpoint(service, region, s.Config) + resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config) if err != nil { s.Handlers.Validate.PushBack(func(r *request.Request) { if len(r.ClientInfo.Endpoint) != 0 { @@ -867,12 +889,13 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, } } const ec2MetadataServiceID = "ec2metadata" -func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { +func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { return endpoints.ResolvedEndpoint{ @@ -884,7 +907,12 @@ func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endp resolved, err := cfg.EndpointResolver.EndpointFor(service, region, func(opt *endpoints.Options) { opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint + + opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is // provided in envConfig or sharedConfig with envConfig getting // precedence. @@ -898,6 +926,11 @@ func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endp // Support the condition where the service is modeled but its // endpoint metadata is not available. opt.ResolveUnknownService = true + + opt.ResolvedRegion = resolvedRegion + + opt.Logger = cfg.Logger + opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated) }, ) if err != nil { @@ -913,6 +946,8 @@ func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endp func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) + resolvedRegion := normalizeRegion(s.Config) + var resolved endpoints.ResolvedEndpoint if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) @@ -926,6 +961,7 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, } } @@ -939,3 +975,23 @@ func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aw r.Error = err }) } + +// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided +// config to have the equivalent options for resolution and returns the resolved region name. +func normalizeRegion(cfg *aws.Config) (resolved string) { + const fipsInfix = "-fips-" + const fipsPrefix = "-fips" + const fipsSuffix = "fips-" + + region := aws.StringValue(cfg.Region) + + if strings.Contains(region, fipsInfix) || + strings.Contains(region, fipsPrefix) || + strings.Contains(region, fipsSuffix) { + resolved = strings.Replace(strings.Replace(strings.Replace( + region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1) + cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } + + return resolved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 6830ece..424c82b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -72,6 +72,12 @@ const ( // EC2 IMDS Endpoint ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // Use FIPS Endpoint Resolution + useFIPSEndpointKey = "use_fips_endpoint" ) // sharedConfig represents the configuration fields of the SDK config files. @@ -161,6 +167,18 @@ type sharedConfig struct { // // ec2_metadata_service_endpoint=http://fd00:ec2::254 EC2IMDSEndpoint string + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint endpoints.FIPSEndpointState } type sharedConfigFile struct { @@ -356,6 +374,10 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e ec2MetadataServiceEndpointModeKey, file.Filename, err) } updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + + updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) + + updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey) } updateString(&cfg.CredentialProcess, section, credentialProcessKey) @@ -673,3 +695,35 @@ func (e CredentialRequiresARNError) OrigErr() error { func (e CredentialRequiresARNError) Error() string { return awserr.SprintError(e.Code(), e.Message(), "", nil) } + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = endpoints.DualStackEndpointStateEnabled + } else { + *dst = endpoints.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = endpoints.FIPSEndpointStateEnabled + } else { + *dst = endpoints.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index d465303..4d78162 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -634,21 +634,25 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) } - headerValues := make([]string, len(headers)) + headerItems := make([]string, len(headers)) for i, k := range headers { if k == "host" { if ctx.Request.Host != "" { - headerValues[i] = "host:" + ctx.Request.Host + headerItems[i] = "host:" + ctx.Request.Host } else { - headerValues[i] = "host:" + ctx.Request.URL.Host + headerItems[i] = "host:" + ctx.Request.URL.Host } } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") + headerValues := make([]string, len(ctx.SignedHeaderVals[k])) + for i, v := range ctx.SignedHeaderVals[k] { + headerValues[i] = strings.TrimSpace(v) + } + headerItems[i] = k + ":" + + strings.Join(headerValues, ",") } } - stripExcessSpaces(headerValues) - ctx.canonicalHeaders = strings.Join(headerValues, "\n") + stripExcessSpaces(headerItems) + ctx.canonicalHeaders = strings.Join(headerItems, "\n") } func (ctx *signingCtx) buildCanonicalString() { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 2a9ad15..9775740 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.40.24" +const SDKVersion = "1.44.65" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go index 24df543..34a481a 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -4,6 +4,7 @@ import ( "fmt" "strconv" "strings" + "unicode" ) var ( @@ -18,7 +19,7 @@ var literalValues = [][]rune{ func isBoolValue(b []rune) bool { for _, lv := range literalValues { - if isLitValue(lv, b) { + if isCaselessLitValue(lv, b) { return true } } @@ -39,6 +40,21 @@ func isLitValue(want, have []rune) bool { return true } +// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. +func isCaselessLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != unicode.ToLower(have[i]) { + return false + } + } + + return true +} + // isNumberValue will return whether not the leading characters in // a byte slice is a number. A number is delimited by whitespace or // the newline token. @@ -177,7 +193,7 @@ func newValue(t ValueType, base int, raw []rune) (Value, error) { case QuotedStringType: v.str = string(raw[1 : len(raw)-1]) case BoolType: - v.boolean = runeCompare(v.raw, runesTrue) + v.boolean = isCaselessLitValue(runesTrue, v.raw) } // issue 2253 diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go index 305999d..b5480fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -57,7 +57,7 @@ func getBoolValue(b []rune) (int, error) { continue } - if isLitValue(lv, b) { + if isCaselessLitValue(lv, b) { n = len(lv) } } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go index 2091ba6..ef43d6c 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go @@ -1,8 +1,6 @@ package s3shared import ( - "strings" - "github.com/aws/aws-sdk-go/aws" awsarn "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/request" @@ -25,18 +23,6 @@ func (r ResourceRequest) AllowCrossRegion() bool { return aws.BoolValue(r.Request.Config.S3UseARNRegion) } -// UseFIPS returns true if request config region is FIPS -func (r ResourceRequest) UseFIPS() bool { - return IsFIPS(aws.StringValue(r.Request.Config.Region)) -} - -// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS -// -// Deprecated: FIPS pseudo-regions will not be in the ARN -func (r ResourceRequest) ResourceConfiguredForFIPS() bool { - return IsFIPS(r.ARN().Region) -} - // IsCrossPartition returns true if client is configured for another partition, than // the partition that resource ARN region resolves to. func (r ResourceRequest) IsCrossPartition() bool { @@ -53,11 +39,6 @@ func (r ResourceRequest) HasCustomEndpoint() bool { return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 } -// IsFIPS returns true if region is a fips region -func IsFIPS(clientRegion string) bool { - return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") -} - // IsCrossRegion returns true if request signing region is not same as configured region func IsCrossRegion(req *request.Request, otherRegion string) bool { return req.ClientInfo.SigningRegion != otherRegion diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go new file mode 100644 index 0000000..4bf2b27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +package eventstreamapi + +import "github.com/aws/aws-sdk-go/aws/request" + +// ApplyHTTPTransportFixes is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go new file mode 100644 index 0000000..2ee2c36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go @@ -0,0 +1,19 @@ +//go:build !go1.18 +// +build !go1.18 + +package eventstreamapi + +import "github.com/aws/aws-sdk-go/aws/request" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event +// stream functionality. Go 1.15 through 1.17 HTTP client could hang forever +// when an HTTP/2 connection failed with an non-200 status code and err. Using +// Expect 100-Continue, allows the HTTP client to gracefully handle the non-200 +// status code, and close the connection. +// +// This is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *request.Request) { + r.Handlers.Sign.PushBack(func(r *request.Request) { + r.HTTPRequest.Header.Set("Expect", "100-Continue") + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go index 864fb67..2aec806 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -82,13 +82,17 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) field, _ := value.Type().FieldByName(payload) tag = field.Tag value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { + if !value.IsValid() && tag.Get("type") != "structure" { return nil } } buf.WriteByte('{') + defer buf.WriteString("}") + + if !value.IsValid() { + return nil + } t := value.Type() first := true @@ -144,8 +148,6 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) } - buf.WriteString("}") - return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go index a029217..d9aa271 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -49,9 +49,8 @@ func Build(req *request.Request) { buf = emptyJSON } - if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { - req.SetBufferBody(buf) - } + // Always serialize the body, don't suppress it. + req.SetBufferBody(buf) if req.ClientInfo.TargetPrefix != "" { target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index fb35fee..63f66af 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -272,7 +272,29 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) switch value := v.Interface().(type) { case string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + value = base64.StdEncoding.EncodeToString([]byte(value)) + } str = value + case []*string: + if tag.Get("location") != "header" || tag.Get("enum") == "" { + return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) + } + buff := &bytes.Buffer{} + for i, sv := range value { + if sv == nil || len(*sv) == 0 { + continue + } + if i != 0 { + buff.WriteRune(',') + } + item := *sv + if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { + item = strconv.Quote(item) + } + buff.WriteString(item) + } + str = string(buff.Bytes()) case []byte: str = base64.StdEncoding.EncodeToString(value) case bool: @@ -306,5 +328,6 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err } + return str, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go index 4366de2..b54c99e 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -28,18 +28,27 @@ func PayloadMember(i interface{}) interface{} { return nil } -// PayloadType returns the type of a payload field member of i if there is one, or "". +const nopayloadPayloadType = "nopayload" + +// PayloadType returns the type of a payload field member of i if there is one, +// or "". func PayloadType(i interface{}) string { v := reflect.Indirect(reflect.ValueOf(i)) if !v.IsValid() { return "" } + if field, ok := v.Type().FieldByName("_"); ok { + if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" { + return nopayloadPayloadType + } + if payloadName := field.Tag.Get("payload"); payloadName != "" { if member, ok := v.Type().FieldByName(payloadName); ok { return member.Tag.Get("type") } } } + return "" } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 92f8b4d..cdef403 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -140,7 +140,7 @@ func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHe prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) if err != nil { - awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } } } @@ -204,6 +204,13 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro switch v.Interface().(type) { case *string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return fmt.Errorf("failed to decode JSONValue, %v", err) + } + header = string(b) + } v.Set(reflect.ValueOf(&header)) case []byte: b, err := base64.StdEncoding.DecodeString(header) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index d486a4c..d9a4e76 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -29,7 +29,8 @@ const ( RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999" // This format is used for output time with fractional second precision up to milliseconds ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" @@ -82,6 +83,7 @@ func ParseTime(formatName, value string) (time.Time, error) { case ISO8601TimeFormatName: // Smithy DateTime format return tryParse(value, ISO8601TimeFormat, + iso8601TimeFormatNoZ, time.RFC3339Nano, time.RFC3339, ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 3315330..13a367d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -196,6 +196,10 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // to retry the failed requests. For more information, see Amazon S3 Error Best // Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). // +// You cannot use Content-Type: application/x-www-form-urlencoded with Complete +// Multipart Upload requests. Also, if you do not provide a Content-Type header, +// CompleteMultipartUpload returns a 200 OK response. +// // For more information about multipart uploads, see Uploading Objects Using // Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // @@ -309,8 +313,8 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // You can store individual objects of up to 5 TB in Amazon S3. You create a // copy of your object up to 5 GB in size in a single atomic action using this // API. However, to copy an object greater than 5 GB, you must use the multipart -// upload Upload Part - Copy API. For more information, see Copy Object Using -// the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// upload Upload Part - Copy (UploadPartCopy) API. For more information, see +// Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). // // All copy requests must be authenticated. Additionally, you must have read // access to the source object and write access to the destination bucket. For @@ -359,7 +363,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition // keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). // -// x-amz-copy-source-if Headers +// x-amz-copy-source-if Headers // // To only copy an object under certain conditions, such as whether the Etag // matches or whether the object was modified before or after a specified date, @@ -416,6 +420,28 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). // +// If the bucket that you're copying objects to uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. +// Buckets that use this setting only accept PUT requests that don't specify +// an ACL or PUT requests that specify bucket owner full control ACLs, such +// as the bucket-owner-full-control canned ACL or an equivalent form of this +// ACL expressed in the XML format. +// +// For more information, see Controlling ownership of objects and disabling +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// If your bucket uses the bucket owner enforced setting for Object Ownership, +// all objects written to the bucket by any account will be owned by the bucket +// owner. +// +// Checksums +// +// When copying an object, if it has a checksum, that checksum will be copied +// to the new object by default. When you copy the object over, you may optionally +// specify a different checksum algorithm to use with the x-amz-checksum-algorithm +// header. +// // Storage Class Options // // You can use the CopyObject action to change the storage class of an object @@ -554,8 +580,18 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // your application must be able to handle 307 redirect. For more information, // see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). // -// When creating a bucket using this operation, you can optionally specify the -// accounts or groups that should be granted specific permissions on the bucket. +// Access control lists (ACLs) +// +// When creating a bucket using this operation, you can optionally configure +// the bucket ACL to specify the accounts or groups that should be granted specific +// permissions on the bucket. +// +// If your CreateBucket request sets bucket owner enforced for S3 Object Ownership +// and specifies a bucket ACL that provides access to an external Amazon Web +// Services account, your request fails with a 400 error and returns the InvalidBucketAclWithObjectOwnership +// error code. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// // There are two ways to grant the appropriate permissions using the request // headers. // @@ -568,11 +604,11 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control // headers. These headers map to the set of permissions Amazon S3 supports // in an ACL. For more information, see Access control list (ACL) overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You -// specify each grantee as a type=value pair, where the type is one of the -// following: id – if the value specified is the canonical user ID of an -// Amazon Web Services account uri – if you are granting permissions to -// a predefined group emailAddress – if the value specified is the email +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an Amazon Web Services account uri – if you are granting permissions +// to a predefined group emailAddress – if the value specified is the email // address of an Amazon Web Services account Using email addresses to specify // a grantee is only supported in the following Amazon Web Services Regions: // US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific @@ -589,15 +625,23 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // // Permissions // -// If your CreateBucket request specifies ACL permissions and the ACL is public-read, -// public-read-write, authenticated-read, or if you specify access permissions -// explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl -// permissions are needed. If the ACL the CreateBucket request is private, only -// s3:CreateBucket permission is needed. +// In addition to s3:CreateBucket, the following permissions are required when +// your CreateBucket includes specific headers: +// +// * ACLs - If your CreateBucket request specifies ACL permissions and the +// ACL is public-read, public-read-write, authenticated-read, or if you specify +// access permissions explicitly through any other ACL, both s3:CreateBucket +// and s3:PutBucketAcl permissions are needed. If the ACL the CreateBucket +// request is private or doesn't specify any ACLs, only s3:CreateBucket permission +// is needed. +// +// * Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket +// request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning +// permissions are required. // -// If ObjectLockEnabledForBucket is set to true in your CreateBucket request, -// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions -// are required. +// * S3 Object Ownership - If your CreateBucket request includes the the +// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission +// is required. // // The following operations are related to CreateBucket: // @@ -726,16 +770,15 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // You can optionally request server-side encryption. For server-side encryption, // Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. You can provide your own encryption key, -// or use Amazon Web Services Key Management Service (Amazon Web Services KMS) -// customer master keys (CMKs) or Amazon S3-managed encryption keys. If you -// choose to provide your own encryption key, the request headers you provide -// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. +// If you choose to provide your own encryption key, the request headers you +// provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload // by using CreateMultipartUpload. // // To perform a multipart upload with encryption using an Amazon Web Services -// KMS CMK, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* +// KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* // actions on the key. These permissions are required because Amazon S3 must // decrypt and read data from the encrypted file parts before it completes the // multipart upload. For more information, see Multipart upload API and permissions @@ -743,10 +786,10 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // in the Amazon S3 User Guide. // // If your Identity and Access Management (IAM) user or role is in the same -// Amazon Web Services account as the Amazon Web Services KMS CMK, then you -// must have these permissions on the key policy. If your IAM user or role belongs -// to a different account than the key, then you must have the permissions on -// both the key policy and your IAM user or role. +// Amazon Web Services account as the KMS key, then you must have these permissions +// on the key policy. If your IAM user or role belongs to a different account +// than the key, then you must have the permissions on both the key policy and +// your IAM user or role. // // For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). // @@ -776,27 +819,25 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // use Amazon Web Services managed encryption keys or provide your own encryption // key. // -// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) -// stored in Amazon Web Services Key Management Service (Amazon Web Services -// KMS) – If you want Amazon Web Services to manage the keys used to encrypt +// * Use encryption keys managed by Amazon S3 or customer managed key stored +// in Amazon Web Services Key Management Service (Amazon Web Services KMS) +// – If you want Amazon Web Services to manage the keys used to encrypt // data, specify the following headers in the request. x-amz-server-side-encryption // x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context // If you specify x-amz-server-side-encryption:aws:kms, but don't provide // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon -// Web Services managed CMK in Amazon Web Services KMS to protect the data. +// Web Services managed key in Amazon Web Services KMS to protect the data. // All GET and PUT requests for an object protected by Amazon Web Services // KMS fail if you don't make them with SSL or by using SigV4. For more information -// about server-side encryption with CMKs stored in Amazon Web Services KMS -// (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs -// stored in Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// about server-side encryption with KMS key (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // * Use customer-provided encryption keys – If you want to manage your // own encryption keys, provide all the following headers in the request. // x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key // x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with CMKs stored in Amazon Web Services KMS (SSE-KMS), -// see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon -// Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // Access-Control-List (ACL)-Specific Request Headers // @@ -1279,20 +1320,20 @@ func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBuc // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. -// -// The S3 Intelligent-Tiering storage class is suitable for objects larger than -// 128 KB that you plan to store for at least 30 days. If the size of an object -// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates // in the S3 Intelligent-Tiering storage class. // -// If you delete an object before the end of the 30-day minimum storage duration -// period, you are charged for 30 days. For more information, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // // Operations related to DeleteBucketIntelligentTieringConfiguration include: // @@ -2616,6 +2657,12 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // is granted to the anonymous user, you can return the ACL of the bucket without // using an authorization header. // +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the bucket-owner-full-control +// ACL with the owner being the account that created the bucket. For more information, +// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// // Related Resources // // * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) @@ -2786,13 +2833,14 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // -// Returns the cors configuration information set for the bucket. +// Returns the Cross-Origin Resource Sharing (CORS) configuration information +// set for the bucket. // // To use this operation, you must have permission to perform the s3:GetBucketCORS // action. By default, the bucket owner has this permission and can grant it // to others. // -// For more information about cors, see Enabling Cross-Origin Resource Sharing +// For more information about CORS, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). // // The following operations are related to GetBucketCors: @@ -2968,20 +3016,20 @@ func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketInt // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. -// -// The S3 Intelligent-Tiering storage class is suitable for objects larger than -// 128 KB that you plan to store for at least 30 days. If the size of an object -// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates // in the S3 Intelligent-Tiering storage class. // -// If you delete an object before the end of the 30-day minimum storage duration -// period, you are charged for 30 days. For more information, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // // Operations related to GetBucketIntelligentTieringConfiguration include: // @@ -3806,10 +3854,10 @@ func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControls // // Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, // you must have the s3:GetBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). // // For information about Amazon S3 Object Ownership, see Using Object Ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html). // // The following operations are related to GetBucketOwnershipControls: // @@ -4258,7 +4306,7 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // GetBucketTagging has the following special error: // -// * Error code: NoSuchTagSetError Description: There is no tag set associated +// * Error code: NoSuchTagSet Description: There is no tag set associated // with the bucket. // // The following operations are related to GetBucketTagging: @@ -4533,8 +4581,6 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // more information about request types, see HTTP Host Header Bucket Specification // (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). // -// To distribute large files to many people, you can save bandwidth costs by -// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). // For more information about returning the ACL of an object, see GetObjectAcl // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). // @@ -4547,9 +4593,9 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // Encryption request headers, like x-amz-server-side-encryption, should not // be sent for GET requests if your object uses server-side encryption with -// CMKs stored in Amazon Web Services KMS (SSE-KMS) or server-side encryption -// with Amazon S3–managed encryption keys (SSE-S3). If your object does use -// these types of keys, you’ll get an HTTP 400 BadRequest error. +// KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption +// keys (SSE-S3). If your object does use these types of keys, you’ll get +// an HTTP 400 BadRequest error. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -4564,16 +4610,15 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). // -// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging -// action), the response also returns the x-amz-tagging-count header that provides -// the count of number of tags associated with the object. You can use GetObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// Assuming you have the relevant permission to read object tags, the response +// also returns the x-amz-tagging-count header that provides the count of number +// of tags associated with the object. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // to retrieve the tag set associated with an object. // // Permissions // -// You need the s3:GetObject permission for this operation. For more information, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// You need the relevant read object (or version) permission for this operation. +// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). // If the object you request does not exist, the error Amazon S3 returns depends // on whether you also have the s3:ListBucket permission. // @@ -4588,8 +4633,9 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // By default, the GET action returns the current version of an object. To return // a different version, use the versionId subresource. // -// * You need the s3:GetObjectVersion permission to access a specific version -// of an object. +// * If you supply a versionId, you need the s3:GetObjectVersion permission +// to access a specific version of an object. If you request a specific version, +// you do not need to have the s3:GetObject permission. // // * If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in @@ -4727,7 +4773,10 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // GetObjectAcl API operation for Amazon Simple Storage Service. // // Returns the access control list (ACL) of an object. To use this operation, -// you must have READ_ACP access to the object. +// you must have s3:GetObjectAcl permissions or READ_ACP access to the object. +// For more information, see Mapping of ACL permissions and access policy permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide // // This action is not supported by Amazon S3 on Outposts. // @@ -4736,10 +4785,18 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // By default, GET returns ACL information about the current version of an object. // To return ACL information about a different version, use the versionId subresource. // +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the bucket-owner-full-control +// ACL with the owner being the account that created the bucket. For more information, +// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// // The following operations are related to GetObjectAcl: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// // * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) @@ -4777,6 +4834,166 @@ func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, return out, req.Send() } +const opGetObjectAttributes = "GetObjectAttributes" + +// GetObjectAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAttributes for more information on using the GetObjectAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectAttributesRequest method. +// req, resp := client.GetObjectAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes +func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *request.Request, output *GetObjectAttributesOutput) { + op := &request.Operation{ + Name: opGetObjectAttributes, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?attributes", + } + + if input == nil { + input = &GetObjectAttributesInput{} + } + + output = &GetObjectAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAttributes API operation for Amazon Simple Storage Service. +// +// Retrieves all the metadata from an object without returning the object itself. +// This action is useful if you're interested only in an object's metadata. +// To use GetObjectAttributes, you must have READ access to the object. +// +// GetObjectAttributes combines the functionality of GetObjectAcl, GetObjectLegalHold, +// GetObjectLockConfiguration, GetObjectRetention, GetObjectTagging, HeadObject, +// and ListParts. All of the data returned with each of those individual calls +// can be returned with a single call to GetObjectAttributes. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// * Encryption request headers, such as x-amz-server-side-encryption, should +// not be sent for GET requests if your object uses server-side encryption +// with Amazon Web Services KMS keys stored in Amazon Web Services Key Management +// Service (SSE-KMS) or server-side encryption with Amazon S3 managed encryption +// keys (SSE-S3). If your object does use these types of keys, you'll get +// an HTTP 400 Bad Request error. +// +// * The last modified property in this case is the creation date of the +// object. +// +// Consider the following when using request headers: +// +// * If both of the If-Match and If-Unmodified-Since headers are present +// in the request as follows, then Amazon S3 returns the HTTP status code +// 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since +// condition evaluates to false. +// +// * If both of the If-None-Match and If-Modified-Since headers are present +// in the request as follows, then Amazon S3 returns the HTTP status code +// 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since +// condition evaluates to true. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// The permissions that you need to use this operation depend on whether the +// bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion +// and s3:GetObjectVersionAttributes permissions for this operation. If the +// bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes +// permissions. For more information, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, +// the error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 Not Found ("no such key") error. +// +// * If you don't have the s3:ListBucket permission, Amazon S3 returns an +// HTTP status code 403 Forbidden ("access denied") error. +// +// The following actions are related to GetObjectAttributes: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// * GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) +// +// * GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) +// +// * GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAttributes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes +func (c *S3) GetObjectAttributes(input *GetObjectAttributesInput) (*GetObjectAttributesOutput, error) { + req, out := c.GetObjectAttributesRequest(input) + return out, req.Send() +} + +// GetObjectAttributesWithContext is the same as GetObjectAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAttributesWithContext(ctx aws.Context, input *GetObjectAttributesInput, opts ...request.Option) (*GetObjectAttributesOutput, error) { + req, out := c.GetObjectAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetObjectLegalHold = "GetObjectLegalHold" // GetObjectLegalHoldRequest generates a "aws/request.Request" representing the @@ -4821,11 +5038,15 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req // GetObjectLegalHold API operation for Amazon Simple Storage Service. // -// Gets an object's current Legal Hold status. For more information, see Locking +// Gets an object's current legal hold status. For more information, see Locking // Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // This action is not supported by Amazon S3 on Outposts. // +// The following action is related to GetObjectLegalHold: +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4903,6 +5124,10 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration // placed in the specified bucket. For more information, see Locking Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // +// The following action is related to GetObjectLockConfiguration: +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4980,6 +5205,10 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req // // This action is not supported by Amazon S3 on Outposts. // +// The following action is related to GetObjectRetention: +// +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5068,12 +5297,14 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // For information about the Amazon S3 object tagging feature, see Object Tagging // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // -// The following action is related to GetObjectTagging: -// -// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// The following actions are related to GetObjectTagging: // // * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) // +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5456,9 +5687,9 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // * Encryption request headers, like x-amz-server-side-encryption, should // not be sent for GET requests if your object uses server-side encryption -// with CMKs stored in Amazon Web Services KMS (SSE-KMS) or server-side encryption -// with Amazon S3–managed encryption keys (SSE-S3). If your object does -// use these types of keys, you’ll get an HTTP 400 BadRequest error. +// with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, +// you’ll get an HTTP 400 BadRequest error. // // * The last modified property in this case is the creation date of the // object. @@ -5482,8 +5713,8 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // Permissions // -// You need the s3:GetObject permission for this operation. For more information, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// You need the relevant read object (or version) permission for this operation. +// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). // If the object you request does not exist, the error Amazon S3 returns depends // on whether you also have the s3:ListBucket permission. // @@ -5493,10 +5724,12 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // * If you don’t have the s3:ListBucket permission, Amazon S3 returns // an HTTP status code 403 ("access denied") error. // -// The following action is related to HeadObject: +// The following actions are related to HeadObject: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// // See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses // for more information on returned errors. // @@ -5676,20 +5909,20 @@ func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucket // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. -// -// The S3 Intelligent-Tiering storage class is suitable for objects larger than -// 128 KB that you plan to store for at least 30 days. If the size of an object -// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates // in the S3 Intelligent-Tiering storage class. // -// If you delete an object before the end of the 30-day minimum storage duration -// period, you are charged for 30 days. For more information, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // // Operations related to ListBucketIntelligentTieringConfigurations include: // @@ -5973,6 +6206,7 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // ListBuckets API operation for Amazon Simple Storage Service. // // Returns a list of all buckets owned by the authenticated sender of the request. +// To use this operation, you must have the s3:ListAllMyBuckets permission. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6707,6 +6941,9 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // requests you can include the part-number-marker query string parameter and // set its value to the NextPartNumberMarker field value from the previous response. // +// If the upload was created using a checksum algorithm, you will need to have +// permission to the kms:Decrypt action for the request to succeed. +// // For more information on multipart uploads, see Uploading Objects Using Multipart // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // @@ -6723,6 +6960,8 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // // * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // +// * GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// // * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6981,6 +7220,14 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // existing application that updates a bucket ACL using the request body, then // you can continue to use that approach. // +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies +// to grant access to your bucket and the objects in it. Requests to set ACLs +// or update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// // Access Permissions // // You can set access permissions using one of the following methods: @@ -7372,10 +7619,12 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // and Amazon S3 Bucket Key for an existing bucket. // // Default encryption for a bucket can use server-side encryption with Amazon -// S3-managed keys (SSE-S3) or Amazon Web Services KMS customer master keys -// (SSE-KMS). If you specify default encryption using SSE-KMS, you can also -// configure Amazon S3 Bucket Key. For information about default encryption, -// see Amazon S3 default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify +// default encryption using SSE-KMS, you can also configure Amazon S3 Bucket +// Key. When the default encryption is SSE-KMS, if you upload an object to the +// bucket and do not specify the KMS key to use for encryption, Amazon S3 uses +// the default Amazon Web Services managed KMS key for your account. For information +// about default encryption, see Amazon S3 default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) // in the Amazon S3 User Guide. @@ -7474,20 +7723,20 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. -// -// The S3 Intelligent-Tiering storage class is suitable for objects larger than -// 128 KB that you plan to store for at least 30 days. If the size of an object -// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates // in the S3 Intelligent-Tiering storage class. // -// If you delete an object before the end of the 30-day minimum storage duration -// period, you are charged for 30 days. For more information, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // // Operations related to PutBucketIntelligentTieringConfiguration include: // @@ -7849,8 +8098,10 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // // Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. For information about lifecycle configuration, see -// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information +// about lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). // // Bucket lifecycle configuration now supports specifying a lifecycle rule using // an object key name prefix, one or more object tags, or a combination of both. @@ -7862,8 +8113,9 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // Rules // // You specify the lifecycle configuration in your request body. The lifecycle -// configuration is specified as XML consisting of one or more rules. Each rule -// consists of the following: +// configuration is specified as XML consisting of one or more rules. An Amazon +// S3 Lifecycle configuration can have up to 1,000 rules. This limit is not +// adjustable. Each rule consists of the following: // // * Filter identifying a subset of objects to which the rule applies. The // filter can be based on a key name prefix, object tags, or a combination @@ -7998,6 +8250,12 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // the Grantee request element to grant access to other people. The Permissions // request element specifies the kind of access the grantee has to the logs. // +// If the target bucket for log delivery uses the bucket owner enforced setting +// for S3 Object Ownership, you can't use the Grantee request element to grant +// access to others. Permissions can only be granted using policies. For more +// information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +// // Grantee Values // // You can specify the person (grantee) to whom you're assigning access rights @@ -8021,7 +8279,8 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // // // For more information about server access logging, see Server Access Logging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) in +// the Amazon S3 User Guide. // // For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). // For more information about returning the logging status of a bucket, see @@ -8129,7 +8388,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // -// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) // // * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // @@ -8330,6 +8589,10 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // You can disable notifications by adding the empty NotificationConfiguration // element. // +// For more information about the number of event notification configurations +// that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) +// in Amazon Web Services General Reference. +// // By default, only the bucket owner can configure notifications on a bucket. // However, bucket owners can use a bucket policy to grant permission to other // users to set this configuration with s3:PutBucketNotification permission. @@ -8430,11 +8693,11 @@ func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControls // // Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this // operation, you must have the s3:PutBucketOwnershipControls permission. For -// more information about Amazon S3 permissions, see Specifying Permissions -// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// more information about Amazon S3 permissions, see Specifying permissions +// in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html). // -// For information about Amazon S3 Object Ownership, see Using Object Ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// For information about Amazon S3 Object Ownership, see Using object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html). // // The following operations are related to PutBucketOwnershipControls: // @@ -8649,11 +8912,11 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // Handling Replication of Encrypted Objects // // By default, Amazon S3 doesn't replicate objects that are stored at rest using -// server-side encryption with CMKs stored in Amazon Web Services KMS. To replicate -// Amazon Web Services KMS-encrypted objects, add the following: SourceSelectionCriteria, -// SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. -// For information about replication configuration, see Replicating Objects -// Created with SSE Using CMKs stored in Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). // // For information on PutBucketReplication errors, see List of replication-related // error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) @@ -8967,8 +9230,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // PutBucketVersioning API operation for Amazon Simple Storage Service. // -// Sets the versioning state of an existing bucket. To set the versioning state, -// you must be the bucket owner. +// Sets the versioning state of an existing bucket. // // You can set the versioning state with one of the following values: // @@ -8982,10 +9244,10 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) // request does not return a versioning state value. // -// If the bucket owner enables MFA Delete in the bucket versioning configuration, -// the bucket owner must include the x-amz-mfa request header and the Status -// and the MfaDelete request elements in a request to set the versioning state -// of the bucket. +// In order to enable MFA Delete, you must be the bucket owner. If you are the +// bucket owner and want to enable MFA Delete in the bucket versioning configuration, +// you must include the x-amz-mfa request header and the Status and the MfaDelete +// request elements in a request to set the versioning state of the bucket. // // If you have an object expiration lifecycle policy in your non-versioned bucket // and you want to maintain the same permanent delete behavior when you enable @@ -9272,6 +9534,23 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). // +// If the bucket that you're uploading objects to uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. +// Buckets that use this setting only accept PUT requests that don't specify +// an ACL or PUT requests that specify bucket owner full control ACLs, such +// as the bucket-owner-full-control canned ACL or an equivalent form of this +// ACL expressed in the XML format. PUT requests that contain other ACLs (for +// example, custom grants to certain Amazon Web Services accounts) fail and +// return a 400 error with the error code AccessControlListNotSupported. +// +// For more information, see Controlling ownership of objects and disabling +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// If your bucket uses the bucket owner enforced setting for Object Ownership, +// all objects written to the bucket by any account will be owned by the bucket +// owner. +// // Storage Class Options // // By default, Amazon S3 uses the STANDARD Storage Class to store newly created @@ -9391,6 +9670,14 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) // in the Amazon S3 User Guide. // +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies +// to grant access to your bucket and the objects in it. Requests to set ACLs +// or update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// // Access Permissions // // You can set access permissions using one of the following methods: @@ -9544,7 +9831,7 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // PutObjectLegalHold API operation for Amazon Simple Storage Service. // -// Applies a Legal Hold configuration to the specified object. For more information, +// Applies a legal hold configuration to the specified object. For more information, // see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // This action is not supported by Amazon S3 on Outposts. @@ -9722,12 +10009,6 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // // This action is not supported by Amazon S3 on Outposts. // -// Permissions -// -// When the Object Lock retention mode is set to compliance, you need s3:PutObjectRetention -// and s3:BypassGovernanceRetention permissions. For other requests to PutObjectRetention, -// only s3:PutObjectRetention permissions are required. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -10305,6 +10586,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // For more information about Amazon S3 Select, see Selecting Content from Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) // in the Amazon S3 User Guide. // // For more information about using SQL with Amazon S3 Select, see SQL Reference @@ -10340,11 +10622,10 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. For objects that are encrypted with Amazon -// S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored -// in Amazon Web Services Key Management Service (SSE-KMS), server-side encryption -// is handled transparently, so you don't need to specify anything. For more -// information about server-side encryption, including SSE-S3 and SSE-KMS, -// see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// S3 managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), +// server-side encryption is handled transparently, so you don't need to +// specify anything. For more information about server-side encryption, including +// SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) // in the Amazon S3 User Guide. // // Working with the Response Body @@ -10449,7 +10730,7 @@ type SelectObjectContentEventStream struct { // (e.g. http.Response.Body), that will be closed when the stream Close method // is called. // -// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream{ +// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream){ // es.Reader = myMockStreamReader // es.StreamCloser = myMockStreamCloser // }) @@ -10638,9 +10919,11 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // Part numbers can be any number from 1 to 10,000, inclusive. A part number // uniquely identifies a part and also defines its position within the object // being created. If you upload a new part using the same part number that was -// used with a previous part, the previously uploaded part is overwritten. Each -// part must be at least 5 MB in size, except the last part. There is no size -// limit on the last part of your multipart upload. +// used with a previous part, the previously uploaded part is overwritten. +// +// For information about maximum and minimum part sizes and other multipart +// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. // // To ensure that data is not corrupted when traversing the network, specify // the Content-MD5 header in the upload part request. Amazon S3 checks the part @@ -10788,8 +11071,8 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // your request and a byte range by adding the request header x-amz-copy-source-range // in your request. // -// The minimum allowable part size for a multipart upload is 5 MB. For more -// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// For information about maximum and minimum part sizes and other multipart +// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) // in the Amazon S3 User Guide. // // Instead of using an existing object as part data, you might use the UploadPart @@ -10811,7 +11094,7 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // in the Amazon S3 User Guide. // // * For information about copying objects using a single atomic action vs. -// the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) // in the Amazon S3 User Guide. // // * For information about using server-side encryption with customer-provided @@ -10952,8 +11235,8 @@ func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) ( // WriteGetObjectResponse API operation for Amazon Simple Storage Service. // // Passes transformed objects to a GetObject operation when using Object Lambda -// Access Points. For information about Object Lambda Access Points, see Transforming -// objects with Object Lambda Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// access points. For information about Object Lambda access points, see Transforming +// objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) // in the Amazon S3 User Guide. // // This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), @@ -10973,7 +11256,7 @@ func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) ( // (PII) and decompress S3 objects. These Lambda functions are available in // the Amazon Web Services Serverless Application Repository, and can be selected // through the Amazon Web Services Management Console when you create your Object -// Lambda Access Point. +// Lambda access point. // // Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, // a natural language processing (NLP) service using machine learning to find @@ -11036,12 +11319,20 @@ type AbortIncompleteMultipartUpload struct { DaysAfterInitiation *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortIncompleteMultipartUpload) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortIncompleteMultipartUpload) GoString() string { return s.String() } @@ -11067,17 +11358,17 @@ type AbortMultipartUploadInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Key of the object for which the multipart upload was initiated. @@ -11087,8 +11378,8 @@ type AbortMultipartUploadInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -11098,12 +11389,20 @@ type AbortMultipartUploadInput struct { UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortMultipartUploadInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortMultipartUploadInput) GoString() string { return s.String() } @@ -11205,12 +11504,20 @@ type AbortMultipartUploadOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortMultipartUploadOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortMultipartUploadOutput) GoString() string { return s.String() } @@ -11231,12 +11538,20 @@ type AccelerateConfiguration struct { Status *string `type:"string" enum:"BucketAccelerateStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccelerateConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccelerateConfiguration) GoString() string { return s.String() } @@ -11258,12 +11573,20 @@ type AccessControlPolicy struct { Owner *Owner `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccessControlPolicy) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccessControlPolicy) GoString() string { return s.String() } @@ -11312,12 +11635,20 @@ type AccessControlTranslation struct { Owner *string `type:"string" required:"true" enum:"OwnerOverride"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccessControlTranslation) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccessControlTranslation) GoString() string { return s.String() } @@ -11355,12 +11686,20 @@ type AnalyticsAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsAndOperator) GoString() string { return s.String() } @@ -11419,12 +11758,20 @@ type AnalyticsConfiguration struct { StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsConfiguration) GoString() string { return s.String() } @@ -11483,12 +11830,20 @@ type AnalyticsExportDestination struct { S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsExportDestination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsExportDestination) GoString() string { return s.String() } @@ -11534,12 +11889,20 @@ type AnalyticsFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsFilter) GoString() string { return s.String() } @@ -11607,12 +11970,20 @@ type AnalyticsS3BucketDestination struct { Prefix *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsS3BucketDestination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsS3BucketDestination) GoString() string { return s.String() } @@ -11678,12 +12049,20 @@ type Bucket struct { Name *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Bucket) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Bucket) GoString() string { return s.String() } @@ -11712,12 +12091,20 @@ type BucketLifecycleConfiguration struct { Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BucketLifecycleConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BucketLifecycleConfiguration) GoString() string { return s.String() } @@ -11762,12 +12149,20 @@ type BucketLoggingStatus struct { LoggingEnabled *LoggingEnabled `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BucketLoggingStatus) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BucketLoggingStatus) GoString() string { return s.String() } @@ -11807,12 +12202,20 @@ type CORSConfiguration struct { CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CORSConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CORSConfiguration) GoString() string { return s.String() } @@ -11880,12 +12283,20 @@ type CORSRule struct { MaxAgeSeconds *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CORSRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CORSRule) GoString() string { return s.String() } @@ -11994,12 +12405,20 @@ type CSVInput struct { RecordDelimiter *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CSVInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CSVInput) GoString() string { return s.String() } @@ -12076,12 +12495,20 @@ type CSVOutput struct { RecordDelimiter *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CSVOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CSVOutput) GoString() string { return s.String() } @@ -12116,6 +12543,85 @@ func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { return s } +// Contains all the possible checksum or digest values for an object. +type Checksum struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Checksum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Checksum) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *Checksum) SetChecksumCRC32(v string) *Checksum { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *Checksum) SetChecksumCRC32C(v string) *Checksum { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *Checksum) SetChecksumSHA1(v string) *Checksum { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *Checksum) SetChecksumSHA256(v string) *Checksum { + s.ChecksumSHA256 = &v + return s +} + // Container for specifying the Lambda notification configuration. type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` @@ -12130,7 +12636,7 @@ type CloudFunctionConfiguration struct { Event *string `deprecated:"true" type:"string" enum:"Event"` // Bucket events for which to send notifications. - Events []*string `locationName:"Event" type:"list" flattened:"true"` + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. @@ -12140,12 +12646,20 @@ type CloudFunctionConfiguration struct { InvocationRole *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CloudFunctionConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CloudFunctionConfiguration) GoString() string { return s.String() } @@ -12192,12 +12706,20 @@ type CommonPrefix struct { Prefix *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CommonPrefix) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CommonPrefix) GoString() string { return s.String() } @@ -12223,17 +12745,45 @@ type CompleteMultipartUploadInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -12246,23 +12796,53 @@ type CompleteMultipartUploadInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // ID for the initiated multipart upload. // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompleteMultipartUploadInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompleteMultipartUploadInput) GoString() string { return s.String() } @@ -12305,6 +12885,30 @@ func (s *CompleteMultipartUploadInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumCRC32(v string) *CompleteMultipartUploadInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompleteMultipartUploadInput) SetChecksumCRC32C(v string) *CompleteMultipartUploadInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumSHA1(v string) *CompleteMultipartUploadInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumSHA256(v string) *CompleteMultipartUploadInput { + s.ChecksumSHA256 = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { s.ExpectedBucketOwner = &v @@ -12329,6 +12933,31 @@ func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultip return s } +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CompleteMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerKey(v string) *CompleteMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CompleteMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CompleteMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + // SetUploadId sets the UploadId field's value. func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { s.UploadId = &v @@ -12378,9 +13007,9 @@ type CompleteMultipartUploadOutput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Bucket *string `type:"string"` @@ -12388,16 +13017,50 @@ type CompleteMultipartUploadOutput struct { // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + // Entity tag that identifies the newly created object's data. Objects with // different object data will have different entity tags. The entity tag is // an opaque string. The entity tag may or may not be an MD5 digest of the object // data. If the entity tag is not an MD5 digest of the object data, it will // contain one or more nonhexadecimal characters and/or will consist of less - // than 32 or more than 32 hexadecimal digits. + // than 32 or more than 32 hexadecimal digits. For more information about how + // the entity tag is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. ETag *string `type:"string"` // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The object key of the newly created object. @@ -12411,14 +13074,18 @@ type CompleteMultipartUploadOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS customer master key (CMK) in - // your initiate multipart upload request, the response includes this header. - // It confirms the encryption algorithm that Amazon S3 used to encrypt the object. + // encryption key or an Amazon Web Services KMS key in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created object, in case the bucket has versioning @@ -12426,12 +13093,20 @@ type CompleteMultipartUploadOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompleteMultipartUploadOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompleteMultipartUploadOutput) GoString() string { return s.String() } @@ -12455,6 +13130,30 @@ func (s *CompleteMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CompleteMul return s } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumCRC32(v string) *CompleteMultipartUploadOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumCRC32C(v string) *CompleteMultipartUploadOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumSHA1(v string) *CompleteMultipartUploadOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumSHA256(v string) *CompleteMultipartUploadOutput { + s.ChecksumSHA256 = &v + return s +} + // SetETag sets the ETag field's value. func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { s.ETag = &v @@ -12508,15 +13207,26 @@ type CompletedMultipartUpload struct { _ struct{} `type:"structure"` // Array of CompletedPart data types. + // + // If you do not supply a valid Part with your request, the service sends back + // an HTTP 400 response. Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompletedMultipartUpload) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompletedMultipartUpload) GoString() string { return s.String() } @@ -12531,6 +13241,38 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip type CompletedPart struct { _ struct{} `type:"structure"` + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + // Entity tag returned when the part was uploaded. ETag *string `type:"string"` @@ -12539,16 +13281,48 @@ type CompletedPart struct { PartNumber *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompletedPart) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompletedPart) GoString() string { return s.String() } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompletedPart) SetChecksumCRC32(v string) *CompletedPart { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompletedPart) SetChecksumCRC32C(v string) *CompletedPart { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompletedPart) SetChecksumSHA1(v string) *CompletedPart { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompletedPart) SetChecksumSHA256(v string) *CompletedPart { + s.ChecksumSHA256 = &v + return s +} + // SetETag sets the ETag field's value. func (s *CompletedPart) SetETag(v string) *CompletedPart { s.ETag = &v @@ -12589,12 +13363,20 @@ type Condition struct { KeyPrefixEquals *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Condition) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Condition) GoString() string { return s.String() } @@ -12615,12 +13397,20 @@ type ContinuationEvent struct { _ struct{} `locationName:"ContinuationEvent" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ContinuationEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ContinuationEvent) GoString() string { return s.String() } @@ -12664,9 +13454,9 @@ type CopyObjectInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -12684,6 +13474,11 @@ type CopyObjectInput struct { // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + // Indicates the algorithm you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` @@ -12706,7 +13501,7 @@ type CopyObjectInput struct { // the source bucket and the key of the source object, separated by a slash // (/). For example, to copy the object reports/january.pdf from the bucket // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value - // must be URL encoded. + // must be URL-encoded. // // * For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the @@ -12722,7 +13517,7 @@ type CopyObjectInput struct { // For example, to copy the object reports/january.pdf through outpost my-outpost // owned by account 123456789012 in Region us-west-2, use the URL encoding // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. - // The value must be URL encoded. + // The value must be URL-encoded. // // To copy a specific version of an object, append ?versionId= to // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). @@ -12752,6 +13547,10 @@ type CopyObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -12760,13 +13559,13 @@ type CopyObjectInput struct { CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request will fail with an HTTP - // 403 (Access Denied) error. + // bucket is owned by a different account, the request fails with the HTTP status + // code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The account ID of the expected source bucket owner. If the source bucket - // is owned by a different account, the request will fail with an HTTP 403 (Access - // Denied) error. + // is owned by a different account, the request fails with the HTTP status code + // 403 Forbidden (access denied). ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. @@ -12804,7 +13603,7 @@ type CopyObjectInput struct { // with metadata provided in the request. MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - // Specifies whether you want to apply a Legal Hold to the copied object. + // Specifies whether you want to apply a legal hold to the copied object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode that you want to apply to the copied object. @@ -12815,8 +13614,8 @@ type CopyObjectInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -12829,6 +13628,10 @@ type CopyObjectInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -12839,6 +13642,10 @@ type CopyObjectInput struct { // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // Specifies the Amazon Web Services KMS key ID to use for object encryption. @@ -12848,6 +13655,10 @@ type CopyObjectInput struct { // Web Services CLI, see Specifying the Signature Version in Request Authentication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -12877,12 +13688,20 @@ type CopyObjectInput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectInput) GoString() string { return s.String() } @@ -12943,6 +13762,12 @@ func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { return s } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CopyObjectInput) SetChecksumAlgorithm(v string) *CopyObjectInput { + s.ChecksumAlgorithm = &v + return s +} + // SetContentDisposition sets the ContentDisposition field's value. func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { s.ContentDisposition = &v @@ -13233,11 +14058,19 @@ type CopyObjectOutput struct { // If present, specifies the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -13248,12 +14081,20 @@ type CopyObjectOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectOutput) GoString() string { return s.String() } @@ -13328,6 +14169,38 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { type CopyObjectResult struct { _ struct{} `type:"structure"` + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + // Returns the ETag of the new object. The ETag reflects only changes to the // contents of an object, not its metadata. ETag *string `type:"string"` @@ -13336,16 +14209,48 @@ type CopyObjectResult struct { LastModified *time.Time `type:"timestamp"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectResult) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectResult) GoString() string { return s.String() } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CopyObjectResult) SetChecksumCRC32(v string) *CopyObjectResult { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CopyObjectResult) SetChecksumCRC32C(v string) *CopyObjectResult { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CopyObjectResult) SetChecksumSHA1(v string) *CopyObjectResult { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CopyObjectResult) SetChecksumSHA256(v string) *CopyObjectResult { + s.ChecksumSHA256 = &v + return s +} + // SetETag sets the ETag field's value. func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { s.ETag = &v @@ -13362,6 +14267,38 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { type CopyPartResult struct { _ struct{} `type:"structure"` + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + // Entity tag of the object. ETag *string `type:"string"` @@ -13369,16 +14306,48 @@ type CopyPartResult struct { LastModified *time.Time `type:"timestamp"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyPartResult) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyPartResult) GoString() string { return s.String() } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CopyPartResult) SetChecksumCRC32(v string) *CopyPartResult { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CopyPartResult) SetChecksumCRC32C(v string) *CopyPartResult { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CopyPartResult) SetChecksumSHA1(v string) *CopyPartResult { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CopyPartResult) SetChecksumSHA256(v string) *CopyPartResult { + s.ChecksumSHA256 = &v + return s +} + // SetETag sets the ETag field's value. func (s *CopyPartResult) SetETag(v string) *CopyPartResult { s.ETag = &v @@ -13400,12 +14369,20 @@ type CreateBucketConfiguration struct { LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketConfiguration) GoString() string { return s.String() } @@ -13451,14 +14428,38 @@ type CreateBucketInput struct { // Specifies whether you want S3 Object Lock to be enabled for the new bucket. ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that + // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control + // canned ACL or an equivalent form of this ACL expressed in the XML format. + ObjectOwnership *string `location:"header" locationName:"x-amz-object-ownership" type:"string" enum:"ObjectOwnership"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketInput) GoString() string { return s.String() } @@ -13540,21 +14541,33 @@ func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketI return s } +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *CreateBucketInput) SetObjectOwnership(v string) *CreateBucketInput { + s.ObjectOwnership = &v + return s +} + type CreateBucketOutput struct { _ struct{} `type:"structure"` - // Specifies the Region where the bucket will be created. If you are creating - // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need - // to specify the location. + // A forward slash followed by the name of the bucket. Location *string `location:"header" locationName:"Location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketOutput) GoString() string { return s.String() } @@ -13585,9 +14598,9 @@ type CreateMultipartUploadInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -13605,6 +14618,11 @@ type CreateMultipartUploadInput struct { // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + // Indicates the algorithm you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` @@ -13620,8 +14638,8 @@ type CreateMultipartUploadInput struct { ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. @@ -13655,7 +14673,7 @@ type CreateMultipartUploadInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // Specifies whether you want to apply a Legal Hold to the uploaded object. + // Specifies whether you want to apply a legal hold to the uploaded object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // Specifies the Object Lock mode that you want to apply to the uploaded object. @@ -13666,8 +14684,8 @@ type CreateMultipartUploadInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -13680,6 +14698,10 @@ type CreateMultipartUploadInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -13690,15 +14712,23 @@ type CreateMultipartUploadInput struct { // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the ID of the symmetric customer managed Amazon Web Services KMS - // CMK to use for object encryption. All GET and PUT requests for an object - // protected by Amazon Web Services KMS will fail if not made via SSL or using - // SigV4. For information about configuring using any of the officially supported - // Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the - // Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // Specifies the ID of the symmetric customer managed key to use for object + // encryption. All GET and PUT requests for an object protected by Amazon Web + // Services KMS will fail if not made via SSL or using SigV4. For information + // about configuring using any of the officially supported Amazon Web Services + // SDKs and Amazon Web Services CLI, see Specifying the Signature Version in + // Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -13722,12 +14752,20 @@ type CreateMultipartUploadInput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateMultipartUploadInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateMultipartUploadInput) GoString() string { return s.String() } @@ -13785,6 +14823,12 @@ func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartU return s } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetChecksumAlgorithm(v string) *CreateMultipartUploadInput { + s.ChecksumAlgorithm = &v + return s +} + // SetContentDisposition sets the ContentDisposition field's value. func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { s.ContentDisposition = &v @@ -14001,9 +15045,9 @@ type CreateMultipartUploadOutput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Bucket *string `locationName:"Bucket" type:"string"` @@ -14011,6 +15055,9 @@ type CreateMultipartUploadOutput struct { // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // Object key for which the multipart upload was initiated. Key *string `min:"1" type:"string"` @@ -14031,11 +15078,19 @@ type CreateMultipartUploadOutput struct { // If present, specifies the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -14046,12 +15101,20 @@ type CreateMultipartUploadOutput struct { UploadId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateMultipartUploadOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateMultipartUploadOutput) GoString() string { return s.String() } @@ -14087,6 +15150,12 @@ func (s *CreateMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CreateMultipa return s } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetChecksumAlgorithm(v string) *CreateMultipartUploadOutput { + s.ChecksumAlgorithm = &v + return s +} + // SetKey sets the Key field's value. func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { s.Key = &v @@ -14158,12 +15227,20 @@ type DefaultRetention struct { Years *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DefaultRetention) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DefaultRetention) GoString() string { return s.String() } @@ -14200,12 +15277,20 @@ type Delete struct { Quiet *bool `type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Delete) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Delete) GoString() string { return s.String() } @@ -14254,8 +15339,8 @@ type DeleteBucketAnalyticsConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -14264,12 +15349,20 @@ type DeleteBucketAnalyticsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { return s.String() } @@ -14349,12 +15442,20 @@ type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } @@ -14368,17 +15469,25 @@ type DeleteBucketCorsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketCorsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketCorsInput) GoString() string { return s.String() } @@ -14449,12 +15558,20 @@ type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketCorsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketCorsOutput) GoString() string { return s.String() } @@ -14469,17 +15586,25 @@ type DeleteBucketEncryptionInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketEncryptionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketEncryptionInput) GoString() string { return s.String() } @@ -14550,12 +15675,20 @@ type DeleteBucketEncryptionOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketEncryptionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketEncryptionOutput) GoString() string { return s.String() } @@ -14569,17 +15702,25 @@ type DeleteBucketInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInput) GoString() string { return s.String() } @@ -14661,12 +15802,20 @@ type DeleteBucketIntelligentTieringConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { return s.String() } @@ -14740,12 +15889,20 @@ type DeleteBucketIntelligentTieringConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { return s.String() } @@ -14759,8 +15916,8 @@ type DeleteBucketInventoryConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -14769,12 +15926,20 @@ type DeleteBucketInventoryConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInventoryConfigurationInput) GoString() string { return s.String() } @@ -14854,12 +16019,20 @@ type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInventoryConfigurationOutput) GoString() string { return s.String() } @@ -14873,17 +16046,25 @@ type DeleteBucketLifecycleInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketLifecycleInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketLifecycleInput) GoString() string { return s.String() } @@ -14954,12 +16135,20 @@ type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketLifecycleOutput) GoString() string { return s.String() } @@ -14973,8 +16162,8 @@ type DeleteBucketMetricsConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the metrics configuration. @@ -14983,12 +16172,20 @@ type DeleteBucketMetricsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketMetricsConfigurationInput) GoString() string { return s.String() } @@ -15068,12 +16265,20 @@ type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketMetricsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketMetricsConfigurationOutput) GoString() string { return s.String() } @@ -15082,12 +16287,20 @@ type DeleteBucketOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOutput) GoString() string { return s.String() } @@ -15101,17 +16314,25 @@ type DeleteBucketOwnershipControlsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOwnershipControlsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOwnershipControlsInput) GoString() string { return s.String() } @@ -15182,12 +16403,20 @@ type DeleteBucketOwnershipControlsOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOwnershipControlsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOwnershipControlsOutput) GoString() string { return s.String() } @@ -15201,17 +16430,25 @@ type DeleteBucketPolicyInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketPolicyInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketPolicyInput) GoString() string { return s.String() } @@ -15282,12 +16519,20 @@ type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketPolicyOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketPolicyOutput) GoString() string { return s.String() } @@ -15301,17 +16546,25 @@ type DeleteBucketReplicationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketReplicationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketReplicationInput) GoString() string { return s.String() } @@ -15382,12 +16635,20 @@ type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketReplicationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketReplicationOutput) GoString() string { return s.String() } @@ -15401,17 +16662,25 @@ type DeleteBucketTaggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketTaggingInput) GoString() string { return s.String() } @@ -15482,12 +16751,20 @@ type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketTaggingOutput) GoString() string { return s.String() } @@ -15501,17 +16778,25 @@ type DeleteBucketWebsiteInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketWebsiteInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketWebsiteInput) GoString() string { return s.String() } @@ -15582,12 +16867,20 @@ type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } @@ -15613,12 +16906,20 @@ type DeleteMarkerEntry struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteMarkerEntry) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteMarkerEntry) GoString() string { return s.String() } @@ -15675,12 +16976,20 @@ type DeleteMarkerReplication struct { Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteMarkerReplication) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteMarkerReplication) GoString() string { return s.String() } @@ -15706,21 +17015,22 @@ type DeleteObjectInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Indicates whether S3 Object Lock should bypass Governance-mode restrictions - // to process this operation. + // to process this operation. To use this header, you must have the s3:BypassGovernanceRetention + // permission. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Key name of the object to delete. @@ -15736,8 +17046,8 @@ type DeleteObjectInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -15745,12 +17055,20 @@ type DeleteObjectInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectInput) GoString() string { return s.String() } @@ -15869,12 +17187,20 @@ type DeleteObjectOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectOutput) GoString() string { return s.String() } @@ -15912,17 +17238,17 @@ type DeleteObjectTaggingInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key that identifies the object in the bucket from which to remove all @@ -15935,12 +17261,20 @@ type DeleteObjectTaggingInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectTaggingInput) GoString() string { return s.String() } @@ -16032,12 +17366,20 @@ type DeleteObjectTaggingOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectTaggingOutput) GoString() string { return s.String() } @@ -16063,27 +17405,51 @@ type DeleteObjectsInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether you want to delete this object even if it has a Governance-type - // Object Lock in place. You must have sufficient permissions to perform this - // operation. + // Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention + // permission. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // This checksum algorithm must be the same for all parts and it match the checksum + // value supplied in the CreateMultipartUpload request. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // Container for the request. // // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The concatenation of the authentication device's serial number, a space, @@ -16094,18 +17460,26 @@ type DeleteObjectsInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectsInput) GoString() string { return s.String() } @@ -16153,6 +17527,12 @@ func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjects return s } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *DeleteObjectsInput) SetChecksumAlgorithm(v string) *DeleteObjectsInput { + s.ChecksumAlgorithm = &v + return s +} + // SetDelete sets the Delete field's value. func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { s.Delete = v @@ -16220,12 +17600,20 @@ type DeleteObjectsOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectsOutput) GoString() string { return s.String() } @@ -16257,17 +17645,25 @@ type DeletePublicAccessBlockInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletePublicAccessBlockInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletePublicAccessBlockInput) GoString() string { return s.String() } @@ -16338,12 +17734,20 @@ type DeletePublicAccessBlockOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletePublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletePublicAccessBlockOutput) GoString() string { return s.String() } @@ -16369,12 +17773,20 @@ type DeletedObject struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletedObject) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletedObject) GoString() string { return s.String() } @@ -16453,12 +17865,20 @@ type Destination struct { StorageClass *string `type:"string" enum:"StorageClass"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Destination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Destination) GoString() string { return s.String() } @@ -16555,19 +17975,31 @@ type Encryption struct { KMSContext *string `type:"string"` // If the encryption type is aws:kms, this optional value specifies the ID of - // the symmetric customer managed Amazon Web Services KMS CMK to use for encryption - // of job results. Amazon S3 only supports symmetric CMKs. For more information, - // see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // the symmetric customer managed key to use for encryption of job results. + // Amazon S3 only supports symmetric keys. For more information, see Using symmetric + // and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Encryption's + // String and GoString methods. KMSKeyId *string `type:"string" sensitive:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Encryption) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Encryption) GoString() string { return s.String() } @@ -16617,12 +18049,20 @@ type EncryptionConfiguration struct { ReplicaKmsKeyID *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s EncryptionConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s EncryptionConfiguration) GoString() string { return s.String() } @@ -16640,12 +18080,20 @@ type EndEvent struct { _ struct{} `locationName:"EndEvent" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s EndEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s EndEvent) GoString() string { return s.String() } @@ -17041,12 +18489,20 @@ type Error struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Error) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Error) GoString() string { return s.String() } @@ -17089,12 +18545,20 @@ type ErrorDocument struct { Key *string `min:"1" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ErrorDocument) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ErrorDocument) GoString() string { return s.String() } @@ -17121,6 +18585,29 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { return s } +// A container for specifying the configuration for Amazon EventBridge. +type EventBridgeConfiguration struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EventBridgeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EventBridgeConfiguration) GoString() string { + return s.String() +} + // Optional configuration to replicate existing source bucket objects. For more // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) // in the Amazon S3 User Guide. @@ -17131,12 +18618,20 @@ type ExistingObjectReplication struct { Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ExistingObjectReplication) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ExistingObjectReplication) GoString() string { return s.String() } @@ -17176,12 +18671,20 @@ type FilterRule struct { Value *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s FilterRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s FilterRule) GoString() string { return s.String() } @@ -17207,17 +18710,25 @@ type GetBucketAccelerateConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAccelerateConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAccelerateConfigurationInput) GoString() string { return s.String() } @@ -17291,12 +18802,20 @@ type GetBucketAccelerateConfigurationOutput struct { Status *string `type:"string" enum:"BucketAccelerateStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAccelerateConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAccelerateConfigurationOutput) GoString() string { return s.String() } @@ -17316,17 +18835,25 @@ type GetBucketAclInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAclInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAclInput) GoString() string { return s.String() } @@ -17403,12 +18930,20 @@ type GetBucketAclOutput struct { Owner *Owner `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAclOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAclOutput) GoString() string { return s.String() } @@ -17434,8 +18969,8 @@ type GetBucketAnalyticsConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -17444,12 +18979,20 @@ type GetBucketAnalyticsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAnalyticsConfigurationInput) GoString() string { return s.String() } @@ -17532,12 +19075,20 @@ type GetBucketAnalyticsConfigurationOutput struct { AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } @@ -17557,17 +19108,25 @@ type GetBucketCorsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketCorsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketCorsInput) GoString() string { return s.String() } @@ -17642,12 +19201,20 @@ type GetBucketCorsOutput struct { CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketCorsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketCorsOutput) GoString() string { return s.String() } @@ -17668,17 +19235,25 @@ type GetBucketEncryptionInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketEncryptionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketEncryptionInput) GoString() string { return s.String() } @@ -17752,12 +19327,20 @@ type GetBucketEncryptionOutput struct { ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketEncryptionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketEncryptionOutput) GoString() string { return s.String() } @@ -17783,12 +19366,20 @@ type GetBucketIntelligentTieringConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketIntelligentTieringConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { return s.String() } @@ -17865,12 +19456,20 @@ type GetBucketIntelligentTieringConfigurationOutput struct { IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketIntelligentTieringConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { return s.String() } @@ -17890,8 +19489,8 @@ type GetBucketInventoryConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -17900,12 +19499,20 @@ type GetBucketInventoryConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketInventoryConfigurationInput) GoString() string { return s.String() } @@ -17988,12 +19595,20 @@ type GetBucketInventoryConfigurationOutput struct { InventoryConfiguration *InventoryConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketInventoryConfigurationOutput) GoString() string { return s.String() } @@ -18013,17 +19628,25 @@ type GetBucketLifecycleConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleConfigurationInput) GoString() string { return s.String() } @@ -18097,12 +19720,20 @@ type GetBucketLifecycleConfigurationOutput struct { Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleConfigurationOutput) GoString() string { return s.String() } @@ -18122,17 +19753,25 @@ type GetBucketLifecycleInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleInput) GoString() string { return s.String() } @@ -18206,12 +19845,20 @@ type GetBucketLifecycleOutput struct { Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleOutput) GoString() string { return s.String() } @@ -18231,17 +19878,25 @@ type GetBucketLocationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLocationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLocationInput) GoString() string { return s.String() } @@ -18317,12 +19972,20 @@ type GetBucketLocationOutput struct { LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLocationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLocationOutput) GoString() string { return s.String() } @@ -18342,17 +20005,25 @@ type GetBucketLoggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLoggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLoggingInput) GoString() string { return s.String() } @@ -18429,12 +20100,20 @@ type GetBucketLoggingOutput struct { LoggingEnabled *LoggingEnabled `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLoggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLoggingOutput) GoString() string { return s.String() } @@ -18454,8 +20133,8 @@ type GetBucketMetricsConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the metrics configuration. @@ -18464,12 +20143,20 @@ type GetBucketMetricsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketMetricsConfigurationInput) GoString() string { return s.String() } @@ -18552,12 +20239,20 @@ type GetBucketMetricsConfigurationOutput struct { MetricsConfiguration *MetricsConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketMetricsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketMetricsConfigurationOutput) GoString() string { return s.String() } @@ -18577,17 +20272,25 @@ type GetBucketNotificationConfigurationRequest struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketNotificationConfigurationRequest) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketNotificationConfigurationRequest) GoString() string { return s.String() } @@ -18663,17 +20366,25 @@ type GetBucketOwnershipControlsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketOwnershipControlsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketOwnershipControlsInput) GoString() string { return s.String() } @@ -18743,17 +20454,25 @@ func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface type GetBucketOwnershipControlsOutput struct { _ struct{} `type:"structure" payload:"OwnershipControls"` - // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in - // effect for this Amazon S3 bucket. + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + // currently in effect for this Amazon S3 bucket. OwnershipControls *OwnershipControls `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketOwnershipControlsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketOwnershipControlsOutput) GoString() string { return s.String() } @@ -18773,17 +20492,25 @@ type GetBucketPolicyInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyInput) GoString() string { return s.String() } @@ -18857,12 +20584,20 @@ type GetBucketPolicyOutput struct { Policy *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyOutput) GoString() string { return s.String() } @@ -18882,17 +20617,25 @@ type GetBucketPolicyStatusInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyStatusInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyStatusInput) GoString() string { return s.String() } @@ -18966,12 +20709,20 @@ type GetBucketPolicyStatusOutput struct { PolicyStatus *PolicyStatus `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyStatusOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyStatusOutput) GoString() string { return s.String() } @@ -18991,17 +20742,25 @@ type GetBucketReplicationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketReplicationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketReplicationInput) GoString() string { return s.String() } @@ -19076,12 +20835,20 @@ type GetBucketReplicationOutput struct { ReplicationConfiguration *ReplicationConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketReplicationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketReplicationOutput) GoString() string { return s.String() } @@ -19101,17 +20868,25 @@ type GetBucketRequestPaymentInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketRequestPaymentInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketRequestPaymentInput) GoString() string { return s.String() } @@ -19185,12 +20960,20 @@ type GetBucketRequestPaymentOutput struct { Payer *string `type:"string" enum:"Payer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketRequestPaymentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketRequestPaymentOutput) GoString() string { return s.String() } @@ -19210,17 +20993,25 @@ type GetBucketTaggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketTaggingInput) GoString() string { return s.String() } @@ -19296,12 +21087,20 @@ type GetBucketTaggingOutput struct { TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketTaggingOutput) GoString() string { return s.String() } @@ -19321,17 +21120,25 @@ type GetBucketVersioningInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketVersioningInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketVersioningInput) GoString() string { return s.String() } @@ -19410,12 +21217,20 @@ type GetBucketVersioningOutput struct { Status *string `type:"string" enum:"BucketVersioningStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketVersioningOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketVersioningOutput) GoString() string { return s.String() } @@ -19441,17 +21256,25 @@ type GetBucketWebsiteInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketWebsiteInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketWebsiteInput) GoString() string { return s.String() } @@ -19535,12 +21358,20 @@ type GetBucketWebsiteOutput struct { RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketWebsiteOutput) GoString() string { return s.String() } @@ -19585,8 +21416,8 @@ type GetObjectAclInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key of the object for which to get the ACL information. @@ -19596,8 +21427,8 @@ type GetObjectAclInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -19605,12 +21436,20 @@ type GetObjectAclInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectAclInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectAclInput) GoString() string { return s.String() } @@ -19715,12 +21554,20 @@ type GetObjectAclOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectAclOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectAclOutput) GoString() string { return s.String() } @@ -19743,6 +21590,423 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { return s } +type GetObjectAttributesInput struct { + _ struct{} `locationName:"GetObjectAttributesRequest" type:"structure"` + + // The name of the bucket that contains the object. + // + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // When using this action with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this action with S3 on Outposts through the Amazon Web Services SDKs, + // you provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"header" locationName:"x-amz-max-parts" type:"integer"` + + // An XML header that specifies the fields at the root level that you want returned + // in the response. Fields that you do not specify are not returned. + // + // ObjectAttributes is a required field + ObjectAttributes []*string `location:"header" locationName:"x-amz-object-attributes" type:"list" required:"true" enum:"ObjectAttributes"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"header" locationName:"x-amz-part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectAttributesInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // The version ID used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAttributesInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.ObjectAttributes == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectAttributes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAttributesInput) SetBucket(v string) *GetObjectAttributesInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAttributesInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAttributesInput) SetExpectedBucketOwner(v string) *GetObjectAttributesInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAttributesInput) SetKey(v string) *GetObjectAttributesInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *GetObjectAttributesInput) SetMaxParts(v int64) *GetObjectAttributesInput { + s.MaxParts = &v + return s +} + +// SetObjectAttributes sets the ObjectAttributes field's value. +func (s *GetObjectAttributesInput) SetObjectAttributes(v []*string) *GetObjectAttributesInput { + s.ObjectAttributes = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *GetObjectAttributesInput) SetPartNumberMarker(v int64) *GetObjectAttributesInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAttributesInput) SetRequestPayer(v string) *GetObjectAttributesInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectAttributesInput) SetSSECustomerAlgorithm(v string) *GetObjectAttributesInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectAttributesInput) SetSSECustomerKey(v string) *GetObjectAttributesInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectAttributesInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectAttributesInput) SetSSECustomerKeyMD5(v string) *GetObjectAttributesInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAttributesInput) SetVersionId(v string) *GetObjectAttributesInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAttributesInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAttributesInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAttributesInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAttributesOutput struct { + _ struct{} `type:"structure"` + + // The checksum or digest of the object. + Checksum *Checksum `type:"structure"` + + // Specifies whether the object retrieved was (true) or was not (false) a delete + // marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `type:"string"` + + // The creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A collection of parts associated with a multipart upload. + ObjectParts *GetObjectAttributesParts `type:"structure"` + + // The size of the object in bytes. + ObjectSize *int64 `type:"long"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides the storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `type:"string" enum:"StorageClass"` + + // The version ID of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesOutput) GoString() string { + return s.String() +} + +// SetChecksum sets the Checksum field's value. +func (s *GetObjectAttributesOutput) SetChecksum(v *Checksum) *GetObjectAttributesOutput { + s.Checksum = v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectAttributesOutput) SetDeleteMarker(v bool) *GetObjectAttributesOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectAttributesOutput) SetETag(v string) *GetObjectAttributesOutput { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectAttributesOutput) SetLastModified(v time.Time) *GetObjectAttributesOutput { + s.LastModified = &v + return s +} + +// SetObjectParts sets the ObjectParts field's value. +func (s *GetObjectAttributesOutput) SetObjectParts(v *GetObjectAttributesParts) *GetObjectAttributesOutput { + s.ObjectParts = v + return s +} + +// SetObjectSize sets the ObjectSize field's value. +func (s *GetObjectAttributesOutput) SetObjectSize(v int64) *GetObjectAttributesOutput { + s.ObjectSize = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAttributesOutput) SetRequestCharged(v string) *GetObjectAttributesOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectAttributesOutput) SetStorageClass(v string) *GetObjectAttributesOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAttributesOutput) SetVersionId(v string) *GetObjectAttributesOutput { + s.VersionId = &v + return s +} + +// A collection of parts associated with a multipart upload. +type GetObjectAttributesParts struct { + _ struct{} `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A value of true + // indicates that the list was truncated. A list can be truncated if the number + // of parts exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // The maximum number of parts allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the PartNumberMarker request parameter in + // a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // The marker for the current part. + PartNumberMarker *int64 `type:"integer"` + + // A container for elements related to a particular part. A response can contain + // zero or more Parts elements. + Parts []*ObjectPart `locationName:"Part" type:"list" flattened:"true"` + + // The total number of parts. + TotalPartsCount *int64 `locationName:"PartsCount" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesParts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesParts) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetObjectAttributesParts) SetIsTruncated(v bool) *GetObjectAttributesParts { + s.IsTruncated = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *GetObjectAttributesParts) SetMaxParts(v int64) *GetObjectAttributesParts { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *GetObjectAttributesParts) SetNextPartNumberMarker(v int64) *GetObjectAttributesParts { + s.NextPartNumberMarker = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *GetObjectAttributesParts) SetPartNumberMarker(v int64) *GetObjectAttributesParts { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *GetObjectAttributesParts) SetParts(v []*ObjectPart) *GetObjectAttributesParts { + s.Parts = v + return s +} + +// SetTotalPartsCount sets the TotalPartsCount field's value. +func (s *GetObjectAttributesParts) SetTotalPartsCount(v int64) *GetObjectAttributesParts { + s.TotalPartsCount = &v + return s +} + type GetObjectInput struct { _ struct{} `locationName:"GetObjectRequest" type:"structure"` @@ -19755,36 +22019,44 @@ type GetObjectInput struct { // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // + // When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // To retrieve the checksum, this mode must be enabled. + // + // The AWS SDK for Go v1 does not support automatic response payload checksum + // validation. This feature is available in the AWS SDK for Go v2. + ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). + // Return the object only if its entity tag (ETag) is the same as the one specified; + // otherwise, return a 412 (precondition failed) error. IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). + // specified; otherwise, return a 304 (not modified) error. IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key of the object to get. @@ -19806,8 +22078,8 @@ type GetObjectInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -19838,6 +22110,10 @@ type GetObjectInput struct { // must match the one used when storing the data. The key must be appropriate // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -19849,12 +22125,20 @@ type GetObjectInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectInput) GoString() string { return s.String() } @@ -19894,6 +22178,12 @@ func (s *GetObjectInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumMode sets the ChecksumMode field's value. +func (s *GetObjectInput) SetChecksumMode(v string) *GetObjectInput { + s.ChecksumMode = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { s.ExpectedBucketOwner = &v @@ -20045,7 +22335,7 @@ func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { type GetObjectLegalHoldInput struct { _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` - // The bucket name containing the object whose Legal Hold status you want to + // The bucket name containing the object whose legal hold status you want to // retrieve. // // When using this action with an access point, you must direct requests to @@ -20059,32 +22349,40 @@ type GetObjectLegalHoldInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The key name for the object whose Legal Hold status you want to retrieve. + // The key name for the object whose legal hold status you want to retrieve. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // The version ID of the object whose Legal Hold status you want to retrieve. + // The version ID of the object whose legal hold status you want to retrieve. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLegalHoldInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLegalHoldInput) GoString() string { return s.String() } @@ -20178,16 +22476,24 @@ func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, erro type GetObjectLegalHoldOutput struct { _ struct{} `type:"structure" payload:"LegalHold"` - // The current Legal Hold status for the specified object. + // The current legal hold status for the specified object. LegalHold *ObjectLockLegalHold `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLegalHoldOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLegalHoldOutput) GoString() string { return s.String() } @@ -20214,17 +22520,25 @@ type GetObjectLockConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLockConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLockConfigurationInput) GoString() string { return s.String() } @@ -20298,12 +22612,20 @@ type GetObjectLockConfigurationOutput struct { ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLockConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLockConfigurationOutput) GoString() string { return s.String() } @@ -20330,6 +22652,38 @@ type GetObjectOutput struct { // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` @@ -20354,14 +22708,13 @@ type GetObjectOutput struct { // Marker. If false, this response header does not appear in the response. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL. + // An entity tag (ETag) is an opaque identifier assigned by a web server to + // a specific version of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. + // providing object expiration information. The value of the rule-id is URL-encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The date and time at which the object is no longer cacheable. @@ -20393,7 +22746,8 @@ type GetObjectOutput struct { // The date and time when this object's Object Lock will expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // The count of parts this object has. + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` // Amazon S3 can return this if your request involves a bucket that is either @@ -20419,8 +22773,12 @@ type GetObjectOutput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -20443,12 +22801,20 @@ type GetObjectOutput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectOutput) GoString() string { return s.String() } @@ -20477,6 +22843,30 @@ func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { return s } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *GetObjectOutput) SetChecksumCRC32(v string) *GetObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *GetObjectOutput) SetChecksumCRC32C(v string) *GetObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *GetObjectOutput) SetChecksumSHA1(v string) *GetObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *GetObjectOutput) SetChecksumSHA256(v string) *GetObjectOutput { + s.ChecksumSHA256 = &v + return s +} + // SetContentDisposition sets the ContentDisposition field's value. func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { s.ContentDisposition = &v @@ -20662,8 +23052,8 @@ type GetObjectRetentionInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object whose retention settings you want to retrieve. @@ -20673,8 +23063,8 @@ type GetObjectRetentionInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -20682,12 +23072,20 @@ type GetObjectRetentionInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectRetentionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectRetentionInput) GoString() string { return s.String() } @@ -20785,12 +23183,20 @@ type GetObjectRetentionOutput struct { Retention *ObjectLockRetention `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectRetentionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectRetentionOutput) GoString() string { return s.String() } @@ -20816,17 +23222,17 @@ type GetObjectTaggingInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which to get the tagging information. @@ -20836,8 +23242,8 @@ type GetObjectTaggingInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -20845,12 +23251,20 @@ type GetObjectTaggingInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTaggingInput) GoString() string { return s.String() } @@ -20953,12 +23367,20 @@ type GetObjectTaggingOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTaggingOutput) GoString() string { return s.String() } @@ -20985,8 +23407,8 @@ type GetObjectTorrentInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The object key for which to get the information. @@ -20996,18 +23418,26 @@ type GetObjectTorrentInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTorrentInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTorrentInput) GoString() string { return s.String() } @@ -21103,12 +23533,20 @@ type GetObjectTorrentOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTorrentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTorrentOutput) GoString() string { return s.String() } @@ -21135,17 +23573,25 @@ type GetPublicAccessBlockInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetPublicAccessBlockInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetPublicAccessBlockInput) GoString() string { return s.String() } @@ -21220,12 +23666,20 @@ type GetPublicAccessBlockOutput struct { PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetPublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetPublicAccessBlockOutput) GoString() string { return s.String() } @@ -21246,12 +23700,20 @@ type GlacierJobParameters struct { Tier *string `type:"string" required:"true" enum:"Tier"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GlacierJobParameters) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GlacierJobParameters) GoString() string { return s.String() } @@ -21286,12 +23748,20 @@ type Grant struct { Permission *string `type:"string" enum:"Permission"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Grant) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Grant) GoString() string { return s.String() } @@ -21368,12 +23838,20 @@ type Grantee struct { URI *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Grantee) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Grantee) GoString() string { return s.String() } @@ -21436,26 +23914,34 @@ type HeadBucketInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadBucketInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadBucketInput) GoString() string { return s.String() } @@ -21526,12 +24012,20 @@ type HeadBucketOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadBucketOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadBucketOutput) GoString() string { return s.String() } @@ -21551,33 +24045,40 @@ type HeadObjectInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // To retrieve the checksum, this parameter must be enabled. + // + // In addition, if you enable ChecksumMode and the object is encrypted with + // Amazon Web Services Key Management Service (Amazon Web Services KMS), you + // must have permission to use the kms:Decrypt action for the request to succeed. + ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). + // Return the object only if its entity tag (ETag) is the same as the one specified; + // otherwise, return a 412 (precondition failed) error. IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). + // specified; otherwise, return a 304 (not modified) error. IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // The object key. @@ -21591,17 +24092,14 @@ type HeadObjectInput struct { // object. PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). - // - // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // Because HeadObject returns only the metadata for an object, this parameter + // has no effect. Range *string `location:"header" locationName:"Range" type:"string"` // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -21614,6 +24112,10 @@ type HeadObjectInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -21625,12 +24127,20 @@ type HeadObjectInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadObjectInput) GoString() string { return s.String() } @@ -21670,6 +24180,12 @@ func (s *HeadObjectInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumMode sets the ChecksumMode field's value. +func (s *HeadObjectInput) SetChecksumMode(v string) *HeadObjectInput { + s.ChecksumMode = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { s.ExpectedBucketOwner = &v @@ -21798,6 +24314,38 @@ type HeadObjectOutput struct { // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` @@ -21819,14 +24367,13 @@ type HeadObjectOutput struct { // Marker. If false, this response header does not appear in the response. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL. + // An entity tag (ETag) is an opaque identifier assigned by a web server to + // a specific version of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. + // providing object expiration information. The value of the rule-id is URL-encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The date and time at which the object is no longer cacheable. @@ -21864,7 +24411,8 @@ type HeadObjectOutput struct { // is only returned if the requester has the s3:GetObjectRetention permission. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // The count of parts this object has. + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` // Amazon S3 can return this header if your request involves a bucket that is @@ -21876,7 +24424,7 @@ type HeadObjectOutput struct { // these buckets, Amazon S3 will return the x-amz-replication-status header // in the response as follows: // - // * If requesting an object from the source bucket — Amazon S3 will return + // * If requesting an object from the source bucket, Amazon S3 will return // the x-amz-replication-status header if the object in your request is eligible // for replication. For example, suppose that in your replication configuration, // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects @@ -21886,12 +24434,12 @@ type HeadObjectOutput struct { // header with value PENDING, COMPLETED or FAILED indicating object replication // status. // - // * If requesting an object from a destination bucket — Amazon S3 will - // return the x-amz-replication-status header with value REPLICA if the object - // in your request is a replica that Amazon S3 created and there is no replica + // * If requesting an object from a destination bucket, Amazon S3 will return + // the x-amz-replication-status header with value REPLICA if the object in + // your request is a replica that Amazon S3 created and there is no replica // modification replication in progress. // - // * When replicating objects to multiple destination buckets the x-amz-replication-status + // * When replicating objects to multiple destination buckets, the x-amz-replication-status // header acts differently. The header of the source object will only return // a value of COMPLETED when replication is successful to all destinations. // The header will remain at value PENDING until replication has completed @@ -21933,15 +24481,18 @@ type HeadObjectOutput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // If the object is stored using server-side encryption either with an Amazon - // Web Services KMS customer master key (CMK) or an Amazon S3-managed encryption - // key, the response includes this header with the value of the server-side - // encryption algorithm used when storing this object in Amazon S3 (for example, - // AES256, aws:kms). + // Web Services KMS key or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this @@ -21959,12 +24510,20 @@ type HeadObjectOutput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadObjectOutput) GoString() string { return s.String() } @@ -21993,6 +24552,30 @@ func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { return s } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *HeadObjectOutput) SetChecksumCRC32(v string) *HeadObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *HeadObjectOutput) SetChecksumCRC32C(v string) *HeadObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *HeadObjectOutput) SetChecksumSHA1(v string) *HeadObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *HeadObjectOutput) SetChecksumSHA256(v string) *HeadObjectOutput { + s.ChecksumSHA256 = &v + return s +} + // SetContentDisposition sets the ContentDisposition field's value. func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { s.ContentDisposition = &v @@ -22167,12 +24750,20 @@ type IndexDocument struct { Suffix *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IndexDocument) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IndexDocument) GoString() string { return s.String() } @@ -22208,12 +24799,20 @@ type Initiator struct { ID *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Initiator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Initiator) GoString() string { return s.String() } @@ -22248,12 +24847,20 @@ type InputSerialization struct { Parquet *ParquetInput `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InputSerialization) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InputSerialization) GoString() string { return s.String() } @@ -22296,12 +24903,20 @@ type IntelligentTieringAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringAndOperator) GoString() string { return s.String() } @@ -22366,12 +24981,20 @@ type IntelligentTieringConfiguration struct { Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringConfiguration) GoString() string { return s.String() } @@ -22456,12 +25079,20 @@ type IntelligentTieringFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringFilter) GoString() string { return s.String() } @@ -22539,7 +25170,7 @@ type InventoryConfiguration struct { IsEnabled *bool `type:"boolean" required:"true"` // Contains the optional fields that are included in the inventory results. - OptionalFields []*string `locationNameList:"Field" type:"list"` + OptionalFields []*string `locationNameList:"Field" type:"list" enum:"InventoryOptionalField"` // Specifies the schedule for generating inventory results. // @@ -22547,12 +25178,20 @@ type InventoryConfiguration struct { Schedule *InventorySchedule `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryConfiguration) GoString() string { return s.String() } @@ -22650,12 +25289,20 @@ type InventoryDestination struct { S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryDestination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryDestination) GoString() string { return s.String() } @@ -22696,12 +25343,20 @@ type InventoryEncryption struct { SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryEncryption) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryEncryption) GoString() string { return s.String() } @@ -22744,12 +25399,20 @@ type InventoryFilter struct { Prefix *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryFilter) GoString() string { return s.String() } @@ -22804,12 +25467,20 @@ type InventoryS3BucketDestination struct { Prefix *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryS3BucketDestination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryS3BucketDestination) GoString() string { return s.String() } @@ -22882,12 +25553,20 @@ type InventorySchedule struct { Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventorySchedule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventorySchedule) GoString() string { return s.String() } @@ -22919,12 +25598,20 @@ type JSONInput struct { Type *string `type:"string" enum:"JSONType"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s JSONInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s JSONInput) GoString() string { return s.String() } @@ -22944,12 +25631,20 @@ type JSONOutput struct { RecordDelimiter *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s JSONOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s JSONOutput) GoString() string { return s.String() } @@ -22969,12 +25664,20 @@ type KeyFilter struct { FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s KeyFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s KeyFilter) GoString() string { return s.String() } @@ -22994,7 +25697,7 @@ type LambdaFunctionConfiguration struct { // in the Amazon S3 User Guide. // // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) @@ -23012,12 +25715,20 @@ type LambdaFunctionConfiguration struct { LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LambdaFunctionConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LambdaFunctionConfiguration) GoString() string { return s.String() } @@ -23072,12 +25783,20 @@ type LifecycleConfiguration struct { Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleConfiguration) GoString() string { return s.String() } @@ -23130,12 +25849,20 @@ type LifecycleExpiration struct { ExpiredObjectDeleteMarker *bool `type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleExpiration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleExpiration) GoString() string { return s.String() } @@ -23175,7 +25902,7 @@ type LifecycleRule struct { // The Filter is used to identify objects that a Lifecycle Rule applies to. // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is - // required if the LifecycleRule does not containt a Prefix element. + // required if the LifecycleRule does not contain a Prefix element. Filter *LifecycleRuleFilter `type:"structure"` // Unique identifier for the rule. The value cannot be longer than 255 characters. @@ -23215,12 +25942,20 @@ type LifecycleRule struct { Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRule) GoString() string { return s.String() } @@ -23303,6 +26038,12 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 `type:"long"` + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 `type:"long"` + // Prefix identifying one or more objects to which the rule applies. Prefix *string `type:"string"` @@ -23311,12 +26052,20 @@ type LifecycleRuleAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRuleAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRuleAndOperator) GoString() string { return s.String() } @@ -23341,6 +26090,18 @@ func (s *LifecycleRuleAndOperator) Validate() error { return nil } +// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. +func (s *LifecycleRuleAndOperator) SetObjectSizeGreaterThan(v int64) *LifecycleRuleAndOperator { + s.ObjectSizeGreaterThan = &v + return s +} + +// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. +func (s *LifecycleRuleAndOperator) SetObjectSizeLessThan(v int64) *LifecycleRuleAndOperator { + s.ObjectSizeLessThan = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { s.Prefix = &v @@ -23363,6 +26124,12 @@ type LifecycleRuleFilter struct { // of the predicates configured inside the And operator. And *LifecycleRuleAndOperator `type:"structure"` + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 `type:"long"` + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 `type:"long"` + // Prefix identifying one or more objects to which the rule applies. // // Replacement must be made for object keys containing special characters (such @@ -23374,12 +26141,20 @@ type LifecycleRuleFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRuleFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRuleFilter) GoString() string { return s.String() } @@ -23410,6 +26185,18 @@ func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRule return s } +// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. +func (s *LifecycleRuleFilter) SetObjectSizeGreaterThan(v int64) *LifecycleRuleFilter { + s.ObjectSizeGreaterThan = &v + return s +} + +// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. +func (s *LifecycleRuleFilter) SetObjectSizeLessThan(v int64) *LifecycleRuleFilter { + s.ObjectSizeLessThan = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { s.Prefix = &v @@ -23435,17 +26222,25 @@ type ListBucketAnalyticsConfigurationsInput struct { ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketAnalyticsConfigurationsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketAnalyticsConfigurationsInput) GoString() string { return s.String() } @@ -23539,12 +26334,20 @@ type ListBucketAnalyticsConfigurationsOutput struct { NextContinuationToken *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketAnalyticsConfigurationsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { return s.String() } @@ -23587,12 +26390,20 @@ type ListBucketIntelligentTieringConfigurationsInput struct { ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketIntelligentTieringConfigurationsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { return s.String() } @@ -23680,12 +26491,20 @@ type ListBucketIntelligentTieringConfigurationsOutput struct { NextContinuationToken *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { return s.String() } @@ -23729,17 +26548,25 @@ type ListBucketInventoryConfigurationsInput struct { ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketInventoryConfigurationsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketInventoryConfigurationsInput) GoString() string { return s.String() } @@ -23833,12 +26660,20 @@ type ListBucketInventoryConfigurationsOutput struct { NextContinuationToken *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketInventoryConfigurationsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketInventoryConfigurationsOutput) GoString() string { return s.String() } @@ -23882,17 +26717,25 @@ type ListBucketMetricsConfigurationsInput struct { ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketMetricsConfigurationsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketMetricsConfigurationsInput) GoString() string { return s.String() } @@ -23987,12 +26830,20 @@ type ListBucketMetricsConfigurationsOutput struct { NextContinuationToken *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketMetricsConfigurationsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketMetricsConfigurationsOutput) GoString() string { return s.String() } @@ -24025,12 +26876,20 @@ type ListBucketsInput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketsInput) GoString() string { return s.String() } @@ -24038,19 +26897,27 @@ func (s ListBucketsInput) GoString() string { type ListBucketsOutput struct { _ struct{} `type:"structure"` - // The list of buckets owned by the requestor. + // The list of buckets owned by the requester. Buckets []*Bucket `locationNameList:"Bucket" type:"list"` // The owner of the buckets listed. Owner *Owner `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketsOutput) GoString() string { return s.String() } @@ -24082,9 +26949,9 @@ type ListMultipartUploadsInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -24109,8 +26976,8 @@ type ListMultipartUploadsInput struct { EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Together with upload-id-marker, this parameter specifies the multipart upload @@ -24143,12 +27010,20 @@ type ListMultipartUploadsInput struct { UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListMultipartUploadsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListMultipartUploadsInput) GoString() string { return s.String() } @@ -24309,12 +27184,20 @@ type ListMultipartUploadsOutput struct { Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListMultipartUploadsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListMultipartUploadsOutput) GoString() string { return s.String() } @@ -24422,8 +27305,8 @@ type ListObjectVersionsInput struct { EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Specifies the key to start with when listing objects in a bucket. @@ -24448,12 +27331,20 @@ type ListObjectVersionsInput struct { VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectVersionsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectVersionsInput) GoString() string { return s.String() } @@ -24620,12 +27511,20 @@ type ListObjectVersionsOutput struct { Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectVersionsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectVersionsOutput) GoString() string { return s.String() } @@ -24723,9 +27622,9 @@ type ListObjectsInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -24743,8 +27642,8 @@ type ListObjectsInput struct { EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts @@ -24765,12 +27664,20 @@ type ListObjectsInput struct { RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsInput) GoString() string { return s.String() } @@ -24933,12 +27840,20 @@ type ListObjectsOutput struct { Prefix *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsOutput) GoString() string { return s.String() } @@ -25018,9 +27933,9 @@ type ListObjectsV2Input struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -25038,8 +27953,8 @@ type ListObjectsV2Input struct { EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The owner field is not present in listV2 by default, if you want to return @@ -25065,12 +27980,20 @@ type ListObjectsV2Input struct { StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsV2Input) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsV2Input) GoString() string { return s.String() } @@ -25254,9 +28177,9 @@ type ListObjectsV2Output struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Name *string `type:"string"` @@ -25273,12 +28196,20 @@ type ListObjectsV2Output struct { StartAfter *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsV2Output) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsV2Output) GoString() string { return s.String() } @@ -25370,17 +28301,17 @@ type ListPartsInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -25397,23 +28328,53 @@ type ListPartsInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListPartsInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Upload ID identifying the multipart upload whose parts are being listed. // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListPartsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListPartsInput) GoString() string { return s.String() } @@ -25486,6 +28447,31 @@ func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { return s } +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *ListPartsInput) SetSSECustomerAlgorithm(v string) *ListPartsInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *ListPartsInput) SetSSECustomerKey(v string) *ListPartsInput { + s.SSECustomerKey = &v + return s +} + +func (s *ListPartsInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *ListPartsInput) SetSSECustomerKeyMD5(v string) *ListPartsInput { + s.SSECustomerKeyMD5 = &v + return s +} + // SetUploadId sets the UploadId field's value. func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { s.UploadId = &v @@ -25542,6 +28528,9 @@ type ListPartsOutput struct { // not return the access point ARN or access point alias if used. Bucket *string `type:"string"` + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` + // Container element that identifies who initiated the multipart upload. If // the initiator is an Amazon Web Services account, this element provides the // same information as the Owner element. If the initiator is an IAM User, this @@ -25590,12 +28579,20 @@ type ListPartsOutput struct { UploadId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListPartsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListPartsOutput) GoString() string { return s.String() } @@ -25625,6 +28622,12 @@ func (s *ListPartsOutput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *ListPartsOutput) SetChecksumAlgorithm(v string) *ListPartsOutput { + s.ChecksumAlgorithm = &v + return s +} + // SetInitiator sets the Initiator field's value. func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { s.Initiator = v @@ -25725,12 +28728,20 @@ type Location struct { UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Location) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Location) GoString() string { return s.String() } @@ -25837,6 +28848,11 @@ type LoggingEnabled struct { TargetBucket *string `type:"string" required:"true"` // Container for granting information. + // + // Buckets that use the bucket owner enforced setting for Object Ownership don't + // support target grants. For more information, see Permissions for server access + // log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) + // in the Amazon S3 User Guide. TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` // A prefix for all log object keys. If you store log files from multiple Amazon @@ -25847,12 +28863,20 @@ type LoggingEnabled struct { TargetPrefix *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LoggingEnabled) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LoggingEnabled) GoString() string { return s.String() } @@ -25912,12 +28936,20 @@ type MetadataEntry struct { Value *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetadataEntry) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetadataEntry) GoString() string { return s.String() } @@ -25949,12 +28981,20 @@ type Metrics struct { Status *string `type:"string" required:"true" enum:"MetricsStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Metrics) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Metrics) GoString() string { return s.String() } @@ -25990,6 +29030,9 @@ func (s *Metrics) SetStatus(v string) *Metrics { type MetricsAndOperator struct { _ struct{} `type:"structure"` + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string `type:"string"` + // The prefix used when evaluating an AND predicate. Prefix *string `type:"string"` @@ -25997,12 +29040,20 @@ type MetricsAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsAndOperator) GoString() string { return s.String() } @@ -26027,6 +29078,12 @@ func (s *MetricsAndOperator) Validate() error { return nil } +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsAndOperator) SetAccessPointArn(v string) *MetricsAndOperator { + s.AccessPointArn = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { s.Prefix = &v @@ -26043,15 +29100,14 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { // by the metrics configuration ID) from an Amazon S3 bucket. If you're updating // an existing metrics configuration, note that this is a full replacement of // the existing metrics configuration. If you don't include the elements you -// want to keep, they are erased. For more information, see PUT Bucket metrics -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// in the Amazon S3 API Reference. +// want to keep, they are erased. For more information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). type MetricsConfiguration struct { _ struct{} `type:"structure"` // Specifies a metrics configuration filter. The metrics configuration will // only include objects that meet the filter's criteria. A filter must be a - // prefix, a tag, or a conjunction (MetricsAndOperator). + // prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). Filter *MetricsFilter `type:"structure"` // The ID used to identify the metrics configuration. @@ -26060,12 +29116,20 @@ type MetricsConfiguration struct { Id *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsConfiguration) GoString() string { return s.String() } @@ -26102,10 +29166,14 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { // Specifies a metrics configuration filter. The metrics configuration only // includes objects that meet the filter's criteria. A filter must be a prefix, -// a tag, or a conjunction (MetricsAndOperator). +// an object tag, an access point ARN, or a conjunction (MetricsAndOperator). +// For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). type MetricsFilter struct { _ struct{} `type:"structure"` + // The access point ARN used when evaluating a metrics filter. + AccessPointArn *string `type:"string"` + // A conjunction (logical AND) of predicates, which is used in evaluating a // metrics filter. The operator must have at least two predicates, and an object // must match all of the predicates in order for the filter to apply. @@ -26118,12 +29186,20 @@ type MetricsFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsFilter) GoString() string { return s.String() } @@ -26148,6 +29224,12 @@ func (s *MetricsFilter) Validate() error { return nil } +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsFilter) SetAccessPointArn(v string) *MetricsFilter { + s.AccessPointArn = &v + return s +} + // SetAnd sets the And field's value. func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { s.And = v @@ -26170,6 +29252,9 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { type MultipartUpload struct { _ struct{} `type:"structure"` + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` + // Date and time at which the multipart upload was initiated. Initiated *time.Time `type:"timestamp"` @@ -26189,16 +29274,30 @@ type MultipartUpload struct { UploadId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MultipartUpload) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MultipartUpload) GoString() string { return s.String() } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *MultipartUpload) SetChecksumAlgorithm(v string) *MultipartUpload { + s.ChecksumAlgorithm = &v + return s +} + // SetInitiated sets the Initiated field's value. func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { s.Initiated = &v @@ -26243,24 +29342,45 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` + // Specifies how many noncurrent versions Amazon S3 will retain. If there are + // this many more recent noncurrent versions, Amazon S3 will take the associated + // action. For more information about noncurrent versions, see Lifecycle configuration + // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int64 `type:"integer"` + // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // perform the associated action. The value must be a non-zero positive integer. + // For information about the noncurrent days calculations, see How Amazon S3 + // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon S3 User Guide. NoncurrentDays *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NoncurrentVersionExpiration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NoncurrentVersionExpiration) GoString() string { return s.String() } +// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. +func (s *NoncurrentVersionExpiration) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionExpiration { + s.NewerNoncurrentVersions = &v + return s +} + // SetNoncurrentDays sets the NoncurrentDays field's value. func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { s.NoncurrentDays = &v @@ -26268,15 +29388,22 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, -// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning -// is suspended), you can set this action to request that Amazon S3 transition -// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, -// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's -// lifetime. +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, +// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled +// (or versioning is suspended), you can set this action to request that Amazon +// S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, +// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at +// a specific period in the object's lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` + // Specifies how many noncurrent versions Amazon S3 will retain. If there are + // this many more recent noncurrent versions, Amazon S3 will take the associated + // action. For more information about noncurrent versions, see Lifecycle configuration + // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int64 `type:"integer"` + // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent @@ -26288,16 +29415,30 @@ type NoncurrentVersionTransition struct { StorageClass *string `type:"string" enum:"TransitionStorageClass"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NoncurrentVersionTransition) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NoncurrentVersionTransition) GoString() string { return s.String() } +// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. +func (s *NoncurrentVersionTransition) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionTransition { + s.NewerNoncurrentVersions = &v + return s +} + // SetNoncurrentDays sets the NoncurrentDays field's value. func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { s.NoncurrentDays = &v @@ -26315,6 +29456,9 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi type NotificationConfiguration struct { _ struct{} `type:"structure"` + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *EventBridgeConfiguration `type:"structure"` + // Describes the Lambda functions to invoke and the events for which to invoke // them. LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` @@ -26328,12 +29472,20 @@ type NotificationConfiguration struct { TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfiguration) GoString() string { return s.String() } @@ -26378,6 +29530,12 @@ func (s *NotificationConfiguration) Validate() error { return nil } +// SetEventBridgeConfiguration sets the EventBridgeConfiguration field's value. +func (s *NotificationConfiguration) SetEventBridgeConfiguration(v *EventBridgeConfiguration) *NotificationConfiguration { + s.EventBridgeConfiguration = v + return s +} + // SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { s.LambdaFunctionConfigurations = v @@ -26413,12 +29571,20 @@ type NotificationConfigurationDeprecated struct { TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfigurationDeprecated) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfigurationDeprecated) GoString() string { return s.String() } @@ -26451,12 +29617,20 @@ type NotificationConfigurationFilter struct { Key *KeyFilter `locationName:"S3Key" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfigurationFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfigurationFilter) GoString() string { return s.String() } @@ -26471,6 +29645,9 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf type Object struct { _ struct{} `type:"structure"` + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` + // The entity tag is a hash of the object. The ETag reflects changes only to // the contents of an object, not its metadata. The ETag may or may not be an // MD5 digest of the object data. Whether or not it is depends on how the object @@ -26488,7 +29665,9 @@ type Object struct { // // * If an object is created by either the Multipart Upload or Part Copy // operation, the ETag is not an MD5 digest, regardless of the method of - // encryption. + // encryption. If an object is larger than 16 MB, the Amazon Web Services + // Management Console will upload or copy that object as a Multipart Upload, + // and therefore the ETag will not be an MD5 digest. ETag *string `type:"string"` // The name that you assign to an object. You use the object key to retrieve @@ -26508,16 +29687,30 @@ type Object struct { StorageClass *string `type:"string" enum:"ObjectStorageClass"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Object) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Object) GoString() string { return s.String() } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *Object) SetChecksumAlgorithm(v []*string) *Object { + s.ChecksumAlgorithm = v + return s +} + // SetETag sets the ETag field's value. func (s *Object) SetETag(v string) *Object { s.ETag = &v @@ -26571,12 +29764,20 @@ type ObjectIdentifier struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectIdentifier) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectIdentifier) GoString() string { return s.String() } @@ -26624,12 +29825,20 @@ type ObjectLockConfiguration struct { Rule *ObjectLockRule `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockConfiguration) GoString() string { return s.String() } @@ -26646,20 +29855,28 @@ func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfigur return s } -// A Legal Hold configuration for an object. +// A legal hold configuration for an object. type ObjectLockLegalHold struct { _ struct{} `type:"structure"` - // Indicates whether the specified object has a Legal Hold in place. + // Indicates whether the specified object has a legal hold in place. Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockLegalHold) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockLegalHold) GoString() string { return s.String() } @@ -26681,12 +29898,20 @@ type ObjectLockRetention struct { RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockRetention) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockRetention) GoString() string { return s.String() } @@ -26714,12 +29939,20 @@ type ObjectLockRule struct { DefaultRetention *DefaultRetention `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockRule) GoString() string { return s.String() } @@ -26730,10 +29963,110 @@ func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRul return s } +// A container for elements related to an individual part. +type ObjectPart struct { + _ struct{} `type:"structure"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // The part number identifying the part. This value is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` + + // The size of the uploaded part in bytes. + Size *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectPart) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *ObjectPart) SetChecksumCRC32(v string) *ObjectPart { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *ObjectPart) SetChecksumCRC32C(v string) *ObjectPart { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *ObjectPart) SetChecksumSHA1(v string) *ObjectPart { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *ObjectPart) SetChecksumSHA256(v string) *ObjectPart { + s.ChecksumSHA256 = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *ObjectPart) SetPartNumber(v int64) *ObjectPart { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectPart) SetSize(v int64) *ObjectPart { + s.Size = &v + return s +} + // The version of an object. type ObjectVersion struct { _ struct{} `type:"structure"` + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` + // The entity tag is an MD5 hash of that version of the object. ETag *string `type:"string"` @@ -26760,16 +30093,30 @@ type ObjectVersion struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectVersion) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectVersion) GoString() string { return s.String() } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *ObjectVersion) SetChecksumAlgorithm(v []*string) *ObjectVersion { + s.ChecksumAlgorithm = v + return s +} + // SetETag sets the ETag field's value. func (s *ObjectVersion) SetETag(v string) *ObjectVersion { s.ETag = &v @@ -26826,12 +30173,20 @@ type OutputLocation struct { S3 *Location `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OutputLocation) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OutputLocation) GoString() string { return s.String() } @@ -26868,12 +30223,20 @@ type OutputSerialization struct { JSON *JSONOutput `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OutputSerialization) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OutputSerialization) GoString() string { return s.String() } @@ -26901,12 +30264,20 @@ type Owner struct { ID *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Owner) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Owner) GoString() string { return s.String() } @@ -26933,12 +30304,20 @@ type OwnershipControls struct { Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OwnershipControls) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OwnershipControls) GoString() string { return s.String() } @@ -26985,16 +30364,30 @@ type OwnershipControlsRule struct { // ObjectWriter - The uploading account will own the object if the object is // uploaded with the bucket-owner-full-control canned ACL. // + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that + // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control + // canned ACL or an equivalent form of this ACL expressed in the XML format. + // // ObjectOwnership is a required field ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OwnershipControlsRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OwnershipControlsRule) GoString() string { return s.String() } @@ -27023,12 +30416,20 @@ type ParquetInput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ParquetInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ParquetInput) GoString() string { return s.String() } @@ -27037,6 +30438,36 @@ func (s ParquetInput) GoString() string { type Part struct { _ struct{} `type:"structure"` + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + // Entity tag returned when the part was uploaded. ETag *string `type:"string"` @@ -27051,16 +30482,48 @@ type Part struct { Size *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Part) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Part) GoString() string { return s.String() } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *Part) SetChecksumCRC32(v string) *Part { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *Part) SetChecksumCRC32C(v string) *Part { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *Part) SetChecksumSHA1(v string) *Part { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *Part) SetChecksumSHA256(v string) *Part { + s.ChecksumSHA256 = &v + return s +} + // SetETag sets the ETag field's value. func (s *Part) SetETag(v string) *Part { s.ETag = &v @@ -27094,12 +30557,20 @@ type PolicyStatus struct { IsPublic *bool `locationName:"IsPublic" type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PolicyStatus) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PolicyStatus) GoString() string { return s.String() } @@ -27124,12 +30595,20 @@ type Progress struct { BytesScanned *int64 `type:"long"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Progress) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Progress) GoString() string { return s.String() } @@ -27160,12 +30639,20 @@ type ProgressEvent struct { Details *Progress `locationName:"Details" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ProgressEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ProgressEvent) GoString() string { return s.String() } @@ -27217,7 +30704,7 @@ type PublicAccessBlockConfiguration struct { // for this bucket and objects in this bucket. Setting this element to TRUE // causes the following behavior: // - // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // * PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is // public. // // * PUT Object calls fail if the request includes a public ACL. @@ -27253,12 +30740,20 @@ type PublicAccessBlockConfiguration struct { RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PublicAccessBlockConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PublicAccessBlockConfiguration) GoString() string { return s.String() } @@ -27300,18 +30795,43 @@ type PutBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAccelerateConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAccelerateConfigurationInput) GoString() string { return s.String() } @@ -27354,6 +30874,12 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketAccelerateConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketAccelerateConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAccelerateConfigurationInput { s.ExpectedBucketOwner = &v @@ -27391,12 +30917,20 @@ type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAccelerateConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAccelerateConfigurationOutput) GoString() string { return s.String() } @@ -27415,9 +30949,30 @@ type PutBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -27440,12 +30995,20 @@ type PutBucketAclInput struct { GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAclInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAclInput) GoString() string { return s.String() } @@ -27496,6 +31059,12 @@ func (s *PutBucketAclInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketAclInput) SetChecksumAlgorithm(v string) *PutBucketAclInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { s.ExpectedBucketOwner = &v @@ -27563,12 +31132,20 @@ type PutBucketAclOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAclOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAclOutput) GoString() string { return s.String() } @@ -27587,8 +31164,8 @@ type PutBucketAnalyticsConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -27597,12 +31174,20 @@ type PutBucketAnalyticsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAnalyticsConfigurationInput) GoString() string { return s.String() } @@ -27696,12 +31281,20 @@ type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } @@ -27722,18 +31315,47 @@ type PutBucketCorsInput struct { // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketCorsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketCorsInput) GoString() string { return s.String() } @@ -27781,6 +31403,12 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck return s } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketCorsInput) SetChecksumAlgorithm(v string) *PutBucketCorsInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { s.ExpectedBucketOwner = &v @@ -27818,12 +31446,20 @@ type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketCorsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketCorsOutput) GoString() string { return s.String() } @@ -27832,17 +31468,38 @@ type PutBucketEncryptionInput struct { _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer master keys stored in Amazon - // Web Services KMS (SSE-KMS). For information about the Amazon S3 default encryption - // feature, see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For information + // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Specifies the default server-side-encryption configuration. @@ -27851,12 +31508,20 @@ type PutBucketEncryptionInput struct { ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketEncryptionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketEncryptionInput) GoString() string { return s.String() } @@ -27898,6 +31563,12 @@ func (s *PutBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketEncryptionInput) SetChecksumAlgorithm(v string) *PutBucketEncryptionInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketEncryptionInput) SetExpectedBucketOwner(v string) *PutBucketEncryptionInput { s.ExpectedBucketOwner = &v @@ -27941,12 +31612,20 @@ type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketEncryptionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketEncryptionOutput) GoString() string { return s.String() } @@ -27971,12 +31650,20 @@ type PutBucketIntelligentTieringConfigurationInput struct { IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketIntelligentTieringConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { return s.String() } @@ -28064,12 +31751,20 @@ type PutBucketIntelligentTieringConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketIntelligentTieringConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { return s.String() } @@ -28083,8 +31778,8 @@ type PutBucketInventoryConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -28098,12 +31793,20 @@ type PutBucketInventoryConfigurationInput struct { InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketInventoryConfigurationInput) GoString() string { return s.String() } @@ -28197,12 +31900,20 @@ type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketInventoryConfigurationOutput) GoString() string { return s.String() } @@ -28215,21 +31926,50 @@ type PutBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleConfigurationInput) GoString() string { return s.String() } @@ -28268,6 +32008,12 @@ func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLifecycleConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { s.ExpectedBucketOwner = &v @@ -28311,12 +32057,20 @@ type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleConfigurationOutput) GoString() string { return s.String() } @@ -28327,21 +32081,50 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for lifecycle rules. You can add as many as 1000 rules. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleInput) GoString() string { return s.String() } @@ -28380,6 +32163,12 @@ func (s *PutBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLifecycleInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketLifecycleInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleInput { s.ExpectedBucketOwner = &v @@ -28423,12 +32212,20 @@ type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleOutput) GoString() string { return s.String() } @@ -28446,18 +32243,47 @@ type PutBucketLoggingInput struct { // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLoggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLoggingInput) GoString() string { return s.String() } @@ -28505,6 +32331,12 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) * return s } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLoggingInput) SetChecksumAlgorithm(v string) *PutBucketLoggingInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { s.ExpectedBucketOwner = &v @@ -28542,12 +32374,20 @@ type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLoggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLoggingOutput) GoString() string { return s.String() } @@ -28561,8 +32401,8 @@ type PutBucketMetricsConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the metrics configuration. @@ -28576,12 +32416,20 @@ type PutBucketMetricsConfigurationInput struct { MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketMetricsConfigurationInput) GoString() string { return s.String() } @@ -28675,12 +32523,20 @@ type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketMetricsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketMetricsConfigurationOutput) GoString() string { return s.String() } @@ -28694,8 +32550,8 @@ type PutBucketNotificationConfigurationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // A container for specifying the notification configuration of the bucket. @@ -28703,14 +32559,26 @@ type PutBucketNotificationConfigurationInput struct { // // NotificationConfiguration is a required field NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True + // or false value. + SkipDestinationValidation *bool `location:"header" locationName:"x-amz-skip-destination-validation" type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationConfigurationInput) GoString() string { return s.String() } @@ -28764,6 +32632,12 @@ func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v return s } +// SetSkipDestinationValidation sets the SkipDestinationValidation field's value. +func (s *PutBucketNotificationConfigurationInput) SetSkipDestinationValidation(v bool) *PutBucketNotificationConfigurationInput { + s.SkipDestinationValidation = &v + return s +} + func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -28795,12 +32669,20 @@ type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } @@ -28813,9 +32695,30 @@ type PutBucketNotificationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The container for the configuration. @@ -28824,12 +32727,20 @@ type PutBucketNotificationInput struct { NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationInput) GoString() string { return s.String() } @@ -28866,6 +32777,12 @@ func (s *PutBucketNotificationInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketNotificationInput) SetChecksumAlgorithm(v string) *PutBucketNotificationInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketNotificationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationInput { s.ExpectedBucketOwner = &v @@ -28909,12 +32826,20 @@ type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationOutput) GoString() string { return s.String() } @@ -28928,23 +32853,31 @@ type PutBucketOwnershipControlsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want - // to apply to this Amazon S3 bucket. + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + // that you want to apply to this Amazon S3 bucket. // // OwnershipControls is a required field OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketOwnershipControlsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketOwnershipControlsInput) GoString() string { return s.String() } @@ -29029,12 +32962,20 @@ type PutBucketOwnershipControlsOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketOwnershipControlsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketOwnershipControlsOutput) GoString() string { return s.String() } @@ -29047,13 +32988,34 @@ type PutBucketPolicyInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // Set this parameter to true to confirm that you want to remove your permissions // to change this bucket policy in the future. ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The bucket policy as a JSON document. @@ -29062,12 +33024,20 @@ type PutBucketPolicyInput struct { Policy *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketPolicyInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketPolicyInput) GoString() string { return s.String() } @@ -29104,6 +33074,12 @@ func (s *PutBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketPolicyInput) SetChecksumAlgorithm(v string) *PutBucketPolicyInput { + s.ChecksumAlgorithm = &v + return s +} + // SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { s.ConfirmRemoveSelfBucketAccess = &v @@ -29153,12 +33129,20 @@ type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketPolicyOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketPolicyOutput) GoString() string { return s.String() } @@ -29171,9 +33155,30 @@ type PutBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // A container for replication rules. You can add up to 1,000 rules. The maximum @@ -29186,12 +33191,20 @@ type PutBucketReplicationInput struct { Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketReplicationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketReplicationInput) GoString() string { return s.String() } @@ -29233,6 +33246,12 @@ func (s *PutBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketReplicationInput) SetChecksumAlgorithm(v string) *PutBucketReplicationInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput { s.ExpectedBucketOwner = &v @@ -29282,12 +33301,20 @@ type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketReplicationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketReplicationOutput) GoString() string { return s.String() } @@ -29300,9 +33327,30 @@ type PutBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for Payer. @@ -29311,12 +33359,20 @@ type PutBucketRequestPaymentInput struct { RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketRequestPaymentInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketRequestPaymentInput) GoString() string { return s.String() } @@ -29358,6 +33414,12 @@ func (s *PutBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketRequestPaymentInput) SetChecksumAlgorithm(v string) *PutBucketRequestPaymentInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *PutBucketRequestPaymentInput { s.ExpectedBucketOwner = &v @@ -29401,12 +33463,20 @@ type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketRequestPaymentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketRequestPaymentOutput) GoString() string { return s.String() } @@ -29419,9 +33489,30 @@ type PutBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for the TagSet and Tag elements. @@ -29430,12 +33521,20 @@ type PutBucketTaggingInput struct { Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketTaggingInput) GoString() string { return s.String() } @@ -29477,6 +33576,12 @@ func (s *PutBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketTaggingInput) SetChecksumAlgorithm(v string) *PutBucketTaggingInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketTaggingInput) SetExpectedBucketOwner(v string) *PutBucketTaggingInput { s.ExpectedBucketOwner = &v @@ -29520,12 +33625,20 @@ type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketTaggingOutput) GoString() string { return s.String() } @@ -29538,9 +33651,30 @@ type PutBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The concatenation of the authentication device's serial number, a space, @@ -29553,12 +33687,20 @@ type PutBucketVersioningInput struct { VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketVersioningInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketVersioningInput) GoString() string { return s.String() } @@ -29595,6 +33737,12 @@ func (s *PutBucketVersioningInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketVersioningInput) SetChecksumAlgorithm(v string) *PutBucketVersioningInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { s.ExpectedBucketOwner = &v @@ -29644,12 +33792,20 @@ type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketVersioningOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketVersioningOutput) GoString() string { return s.String() } @@ -29662,9 +33818,30 @@ type PutBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for the request. @@ -29673,12 +33850,20 @@ type PutBucketWebsiteInput struct { WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketWebsiteInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketWebsiteInput) GoString() string { return s.String() } @@ -29720,6 +33905,12 @@ func (s *PutBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketWebsiteInput) SetChecksumAlgorithm(v string) *PutBucketWebsiteInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { s.ExpectedBucketOwner = &v @@ -29763,12 +33954,20 @@ type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketWebsiteOutput) GoString() string { return s.String() } @@ -29796,9 +33995,30 @@ type PutObjectAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -29840,9 +34060,9 @@ type PutObjectAclInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Key is a required field @@ -29850,8 +34070,8 @@ type PutObjectAclInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -29859,12 +34079,20 @@ type PutObjectAclInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectAclInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectAclInput) GoString() string { return s.String() } @@ -29921,6 +34149,12 @@ func (s *PutObjectAclInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectAclInput) SetChecksumAlgorithm(v string) *PutObjectAclInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { s.ExpectedBucketOwner = &v @@ -30010,12 +34244,20 @@ type PutObjectAclOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectAclOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectAclOutput) GoString() string { return s.String() } @@ -30050,9 +34292,9 @@ type PutObjectInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -30072,6 +34314,51 @@ type PutObjectInput struct { // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + // Specifies presentational information for the object. For more information, // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` @@ -30103,8 +34390,8 @@ type PutObjectInput struct { ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. For more information, @@ -30152,8 +34439,8 @@ type PutObjectInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -30166,6 +34453,10 @@ type PutObjectInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -30176,16 +34467,24 @@ type PutObjectInput struct { // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If x-amz-server-side-encryption is present and has the value of aws:kms, // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed customer master key - // (CMK) that was used for the object. If you specify x-amz-server-side-encryption:aws:kms, - // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 - // uses the Amazon Web Services managed CMK in Amazon Web Services to protect - // the data. If the KMS key does not exist in the same account issuing the command, - // you must use the full ARN and not just the ID. + // (Amazon Web Services KMS) symmetrical customer managed key that was used + // for the object. If you specify x-amz-server-side-encryption:aws:kms, but + // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses + // the Amazon Web Services managed key to protect the data. If the KMS key does + // not exist in the same account issuing the command, you must use the full + // ARN and not just the ID. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -30225,12 +34524,20 @@ type PutObjectInput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectInput) GoString() string { return s.String() } @@ -30294,6 +34601,36 @@ func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { return s } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectInput) SetChecksumAlgorithm(v string) *PutObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *PutObjectInput) SetChecksumCRC32(v string) *PutObjectInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *PutObjectInput) SetChecksumCRC32C(v string) *PutObjectInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *PutObjectInput) SetChecksumSHA1(v string) *PutObjectInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *PutObjectInput) SetChecksumSHA256(v string) *PutObjectInput { + s.ChecksumSHA256 = &v + return s +} + // SetContentDisposition sets the ContentDisposition field's value. func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { s.ContentDisposition = &v @@ -30493,7 +34830,7 @@ func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { type PutObjectLegalHoldInput struct { _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` - // The bucket name containing the object that you want to place a Legal Hold + // The bucket name containing the object that you want to place a legal hold // on. // // When using this action with an access point, you must direct requests to @@ -30506,37 +34843,66 @@ type PutObjectLegalHoldInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The key name for the object that you want to place a Legal Hold on. + // The key name for the object that you want to place a legal hold on. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Container element for the Legal Hold configuration you want to apply to the + // Container element for the legal hold configuration you want to apply to the // specified object. LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // The version ID of the object that you want to place a Legal Hold on. + // The version ID of the object that you want to place a legal hold on. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLegalHoldInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLegalHoldInput) GoString() string { return s.String() } @@ -30576,6 +34942,12 @@ func (s *PutObjectLegalHoldInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectLegalHoldInput) SetChecksumAlgorithm(v string) *PutObjectLegalHoldInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutObjectLegalHoldInput) SetExpectedBucketOwner(v string) *PutObjectLegalHoldInput { s.ExpectedBucketOwner = &v @@ -30641,12 +35013,20 @@ type PutObjectLegalHoldOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLegalHoldOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLegalHoldOutput) GoString() string { return s.String() } @@ -30665,9 +35045,30 @@ type PutObjectLockConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The Object Lock configuration that you want to apply to the specified bucket. @@ -30675,8 +35076,8 @@ type PutObjectLockConfigurationInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -30684,12 +35085,20 @@ type PutObjectLockConfigurationInput struct { Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLockConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLockConfigurationInput) GoString() string { return s.String() } @@ -30723,6 +35132,12 @@ func (s *PutObjectLockConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectLockConfigurationInput) SetChecksumAlgorithm(v string) *PutObjectLockConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *PutObjectLockConfigurationInput { s.ExpectedBucketOwner = &v @@ -30782,12 +35197,20 @@ type PutObjectLockConfigurationOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLockConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLockConfigurationOutput) GoString() string { return s.String() } @@ -30805,6 +35228,38 @@ type PutObjectOutput struct { // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` @@ -30812,7 +35267,7 @@ type PutObjectOutput struct { // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), // the response includes this header. It includes the expiry-date and rule-id // key-value pairs that provide information about object expiration. The value - // of the rule-id is URL encoded. + // of the rule-id is URL-encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the @@ -30832,30 +35287,46 @@ type PutObjectOutput struct { // If present, specifies the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If x-amz-server-side-encryption is present and has the value of aws:kms, // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // If you specified server-side encryption either with an Amazon Web Services - // KMS customer master key (CMK) or Amazon S3-managed encryption key in your - // PUT request, the response includes this header. It confirms the encryption - // algorithm that Amazon S3 used to encrypt the object. + // KMS key or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version of the object. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectOutput) GoString() string { return s.String() } @@ -30866,6 +35337,30 @@ func (s *PutObjectOutput) SetBucketKeyEnabled(v bool) *PutObjectOutput { return s } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *PutObjectOutput) SetChecksumCRC32(v string) *PutObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *PutObjectOutput) SetChecksumCRC32C(v string) *PutObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *PutObjectOutput) SetChecksumSHA1(v string) *PutObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *PutObjectOutput) SetChecksumSHA256(v string) *PutObjectOutput { + s.ChecksumSHA256 = &v + return s +} + // SetETag sets the ETag field's value. func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { s.ETag = &v @@ -30939,9 +35434,30 @@ type PutObjectRetentionInput struct { // Indicates whether this action should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object that you want to apply this Object Retention @@ -30952,8 +35468,8 @@ type PutObjectRetentionInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -30965,12 +35481,20 @@ type PutObjectRetentionInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectRetentionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectRetentionInput) GoString() string { return s.String() } @@ -31016,6 +35540,12 @@ func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjec return s } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectRetentionInput) SetChecksumAlgorithm(v string) *PutObjectRetentionInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutObjectRetentionInput) SetExpectedBucketOwner(v string) *PutObjectRetentionInput { s.ExpectedBucketOwner = &v @@ -31081,12 +35611,20 @@ type PutObjectRetentionOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectRetentionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectRetentionOutput) GoString() string { return s.String() } @@ -31112,17 +35650,38 @@ type PutObjectTaggingInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Name of the object key. @@ -31132,8 +35691,8 @@ type PutObjectTaggingInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -31146,12 +35705,20 @@ type PutObjectTaggingInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectTaggingInput) GoString() string { return s.String() } @@ -31199,6 +35766,12 @@ func (s *PutObjectTaggingInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectTaggingInput) SetChecksumAlgorithm(v string) *PutObjectTaggingInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { s.ExpectedBucketOwner = &v @@ -31263,12 +35836,20 @@ type PutObjectTaggingOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectTaggingOutput) GoString() string { return s.String() } @@ -31288,9 +35869,30 @@ type PutPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The PublicAccessBlock configuration that you want to apply to this Amazon @@ -31303,12 +35905,20 @@ type PutPublicAccessBlockInput struct { PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutPublicAccessBlockInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutPublicAccessBlockInput) GoString() string { return s.String() } @@ -31345,6 +35955,12 @@ func (s *PutPublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutPublicAccessBlockInput) SetChecksumAlgorithm(v string) *PutPublicAccessBlockInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { s.ExpectedBucketOwner = &v @@ -31388,12 +36004,20 @@ type PutPublicAccessBlockOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutPublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } @@ -31406,7 +36030,7 @@ type QueueConfiguration struct { // A collection of bucket events for which to send notifications // // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) @@ -31424,12 +36048,20 @@ type QueueConfiguration struct { QueueArn *string `locationName:"Queue" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s QueueConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s QueueConfiguration) GoString() string { return s.String() } @@ -31487,7 +36119,7 @@ type QueueConfigurationDeprecated struct { Event *string `deprecated:"true" type:"string" enum:"Event"` // A collection of bucket events for which to send notifications. - Events []*string `locationName:"Event" type:"list" flattened:"true"` + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. @@ -31498,12 +36130,20 @@ type QueueConfigurationDeprecated struct { Queue *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s QueueConfigurationDeprecated) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s QueueConfigurationDeprecated) GoString() string { return s.String() } @@ -31537,17 +36177,24 @@ type RecordsEvent struct { _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` // The byte array of partial, one or more result records. - // // Payload is automatically base64 encoded/decoded by the SDK. Payload []byte `type:"blob"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RecordsEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RecordsEvent) GoString() string { return s.String() } @@ -31619,12 +36266,20 @@ type Redirect struct { ReplaceKeyWith *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Redirect) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Redirect) GoString() string { return s.String() } @@ -31674,12 +36329,20 @@ type RedirectAllRequestsTo struct { Protocol *string `type:"string" enum:"Protocol"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RedirectAllRequestsTo) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RedirectAllRequestsTo) GoString() string { return s.String() } @@ -31727,12 +36390,20 @@ type ReplicaModifications struct { Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicaModifications) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicaModifications) GoString() string { return s.String() } @@ -31776,12 +36447,20 @@ type ReplicationConfiguration struct { Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationConfiguration) GoString() string { return s.String() } @@ -31886,8 +36565,8 @@ type ReplicationRule struct { // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using - // a customer master key (CMK) stored in Amazon Web Services Key Management - // Service (SSE-KMS). + // a customer managed key stored in Amazon Web Services Key Management Service + // (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` // Specifies whether the rule is enabled. @@ -31896,12 +36575,20 @@ type ReplicationRule struct { Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRule) GoString() string { return s.String() } @@ -32018,12 +36705,20 @@ type ReplicationRuleAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRuleAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRuleAndOperator) GoString() string { return s.String() } @@ -32090,12 +36785,20 @@ type ReplicationRuleFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRuleFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRuleFilter) GoString() string { return s.String() } @@ -32157,12 +36860,20 @@ type ReplicationTime struct { Time *ReplicationTimeValue `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationTime) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationTime) GoString() string { return s.String() } @@ -32206,12 +36917,20 @@ type ReplicationTimeValue struct { Minutes *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationTimeValue) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationTimeValue) GoString() string { return s.String() } @@ -32232,12 +36951,20 @@ type RequestPaymentConfiguration struct { Payer *string `type:"string" required:"true" enum:"Payer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RequestPaymentConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RequestPaymentConfiguration) GoString() string { return s.String() } @@ -32270,12 +36997,20 @@ type RequestProgress struct { Enabled *bool `type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RequestProgress) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RequestProgress) GoString() string { return s.String() } @@ -32301,17 +37036,34 @@ type RestoreObjectInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the action was initiated. @@ -32321,8 +37073,8 @@ type RestoreObjectInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -32333,12 +37085,20 @@ type RestoreObjectInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreObjectInput) GoString() string { return s.String() } @@ -32383,6 +37143,12 @@ func (s *RestoreObjectInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *RestoreObjectInput) SetChecksumAlgorithm(v string) *RestoreObjectInput { + s.ChecksumAlgorithm = &v + return s +} + // SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { s.ExpectedBucketOwner = &v @@ -32452,12 +37218,20 @@ type RestoreObjectOutput struct { RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreObjectOutput) GoString() string { return s.String() } @@ -32505,12 +37279,20 @@ type RestoreRequest struct { Type *string `type:"string" enum:"RestoreRequestType"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreRequest) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreRequest) GoString() string { return s.String() } @@ -32603,12 +37385,20 @@ type RoutingRule struct { Redirect *Redirect `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoutingRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoutingRule) GoString() string { return s.String() } @@ -32666,12 +37456,12 @@ type Rule struct { NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, - // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning - // is suspended), you can set this action to request that Amazon S3 transition - // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, - // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's - // lifetime. + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, + // GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled + // (or versioning is suspended), you can set this action to request that Amazon + // S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, + // INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at + // a specific period in the object's lifetime. NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` // Object key prefix that identifies one or more objects to which this rule @@ -32697,12 +37487,20 @@ type Rule struct { Transition *Transition `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Rule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Rule) GoString() string { return s.String() } @@ -32776,19 +37574,31 @@ type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` // Specifies the ID of the Amazon Web Services Key Management Service (Amazon - // Web Services KMS) symmetric customer managed customer master key (CMK) to - // use for encrypting inventory reports. + // Web Services KMS) symmetric customer managed key to use for encrypting inventory + // reports. + // + // KeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SSEKMS's + // String and GoString methods. // // KeyId is a required field KeyId *string `type:"string" required:"true" sensitive:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SSEKMS) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SSEKMS) GoString() string { return s.String() } @@ -32817,12 +37627,20 @@ type SSES3 struct { _ struct{} `locationName:"SSE-S3" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SSES3) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SSES3) GoString() string { return s.String() } @@ -32843,17 +37661,25 @@ type ScanRange struct { // Specifies the start of the byte range. This parameter is optional. Valid // values: non-negative integers. The default value is 0. If only start is supplied, - // it means scan from that point to the end of the file.For example; 50 + // it means scan from that point to the end of the file. For example, 50 // means scan from byte 50 until the end of the file. Start *int64 `type:"long"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ScanRange) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ScanRange) GoString() string { return s.String() } @@ -33054,8 +37880,8 @@ type SelectObjectContentInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The expression that is used to query the object. @@ -33086,16 +37912,26 @@ type SelectObjectContentInput struct { // Specifies if periodic request progress information should be enabled. RequestProgress *RequestProgress `type:"structure"` - // The SSE Algorithm used to encrypt the object. For more information, see Server-Side - // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // The SSE Customer Key. For more information, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SelectObjectContentInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // The SSE Customer Key MD5. For more information, see Server-Side Encryption - // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the byte range of the object to get the records from. A record @@ -33117,12 +37953,20 @@ type SelectObjectContentInput struct { ScanRange *ScanRange `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectObjectContentInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectObjectContentInput) GoString() string { return s.String() } @@ -33280,12 +38124,20 @@ type SelectObjectContentOutput struct { EventStream *SelectObjectContentEventStream } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectObjectContentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectObjectContentOutput) GoString() string { return s.String() } @@ -33328,12 +38180,20 @@ type SelectParameters struct { OutputSerialization *OutputSerialization `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectParameters) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectParameters) GoString() string { return s.String() } @@ -33386,8 +38246,12 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec // Describes the default server-side encryption to apply to new objects in the // bucket. If a PUT Object request doesn't specify any server-side encryption, -// this default encryption will be applied. For more information, see PUT Bucket -// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// this default encryption will be applied. If you don't specify a customer +// managed key at configuration, Amazon S3 automatically creates an Amazon Web +// Services KMS key in your Amazon Web Services account the first time that +// you add an object encrypted with SSE-KMS to a bucket. By default, Amazon +// S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) // in the Amazon S3 API Reference. type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` @@ -33397,9 +38261,9 @@ type ServerSideEncryptionByDefault struct { // and only if SSEAlgorithm is set to aws:kms. // // You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. - // However, if you are using encryption with cross-account operations, you must - // use a fully qualified KMS key ARN. For more information, see Using encryption - // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // However, if you are using encryption with cross-account or Amazon Web Services + // service operations you must use a fully qualified KMS key ARN. For more information, + // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). // // For example: // @@ -33410,6 +38274,10 @@ type ServerSideEncryptionByDefault struct { // Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For // more information, see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSMasterKeyID is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ServerSideEncryptionByDefault's + // String and GoString methods. KMSMasterKeyID *string `type:"string" sensitive:"true"` // Server-side encryption algorithm to use for the default encryption. @@ -33418,12 +38286,20 @@ type ServerSideEncryptionByDefault struct { SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionByDefault) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionByDefault) GoString() string { return s.String() } @@ -33464,12 +38340,20 @@ type ServerSideEncryptionConfiguration struct { Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionConfiguration) GoString() string { return s.String() } @@ -33522,12 +38406,20 @@ type ServerSideEncryptionRule struct { BucketKeyEnabled *bool `type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionRule) GoString() string { return s.String() } @@ -33563,8 +38455,8 @@ func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryp // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using -// a customer master key (CMK) stored in Amazon Web Services Key Management -// Service (SSE-KMS). +// a customer managed key stored in Amazon Web Services Key Management Service +// (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` @@ -33585,12 +38477,20 @@ type SourceSelectionCriteria struct { SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SourceSelectionCriteria) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SourceSelectionCriteria) GoString() string { return s.String() } @@ -33640,12 +38540,20 @@ type SseKmsEncryptedObjects struct { Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SseKmsEncryptedObjects) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SseKmsEncryptedObjects) GoString() string { return s.String() } @@ -33683,12 +38591,20 @@ type Stats struct { BytesScanned *int64 `type:"long"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Stats) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Stats) GoString() string { return s.String() } @@ -33719,12 +38635,20 @@ type StatsEvent struct { Details *Stats `locationName:"Details" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StatsEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StatsEvent) GoString() string { return s.String() } @@ -33775,12 +38699,20 @@ type StorageClassAnalysis struct { DataExport *StorageClassAnalysisDataExport `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StorageClassAnalysis) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StorageClassAnalysis) GoString() string { return s.String() } @@ -33822,12 +38754,20 @@ type StorageClassAnalysisDataExport struct { OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StorageClassAnalysisDataExport) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StorageClassAnalysisDataExport) GoString() string { return s.String() } @@ -33880,12 +38820,20 @@ type Tag struct { Value *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tag) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tag) GoString() string { return s.String() } @@ -33931,12 +38879,20 @@ type Tagging struct { TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tagging) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tagging) GoString() string { return s.String() } @@ -33971,6 +38927,11 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { } // Container for granting information. +// +// Buckets that use the bucket owner enforced setting for Object Ownership don't +// support target grants. For more information, see Permissions server access +// log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. type TargetGrant struct { _ struct{} `type:"structure"` @@ -33981,12 +38942,20 @@ type TargetGrant struct { Permission *string `type:"string" enum:"BucketLogsPermission"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TargetGrant) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TargetGrant) GoString() string { return s.String() } @@ -34041,12 +39010,20 @@ type Tiering struct { Days *int64 `type:"integer" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tiering) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tiering) GoString() string { return s.String() } @@ -34090,7 +39067,7 @@ type TopicConfiguration struct { // in the Amazon S3 User Guide. // // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) @@ -34108,12 +39085,20 @@ type TopicConfiguration struct { TopicArn *string `locationName:"Topic" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TopicConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TopicConfiguration) GoString() string { return s.String() } @@ -34172,7 +39157,7 @@ type TopicConfigurationDeprecated struct { Event *string `deprecated:"true" type:"string" enum:"Event"` // A collection of events related to objects - Events []*string `locationName:"Event" type:"list" flattened:"true"` + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. @@ -34183,12 +39168,20 @@ type TopicConfigurationDeprecated struct { Topic *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TopicConfigurationDeprecated) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TopicConfigurationDeprecated) GoString() string { return s.String() } @@ -34236,12 +39229,20 @@ type Transition struct { StorageClass *string `type:"string" enum:"TransitionStorageClass"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Transition) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Transition) GoString() string { return s.String() } @@ -34279,9 +39280,9 @@ type UploadPartCopyInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -34294,7 +39295,7 @@ type UploadPartCopyInput struct { // * For objects not accessed through an access point, specify the name of // the source bucket and key of the source object, separated by a slash (/). // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, - // use awsexamplebucket/reports/january.pdf. The value must be URL encoded. + // use awsexamplebucket/reports/january.pdf. The value must be URL-encoded. // // * For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the @@ -34310,7 +39311,7 @@ type UploadPartCopyInput struct { // For example, to copy the object reports/january.pdf through outpost my-outpost // owned by account 123456789012 in Region us-west-2, use the URL encoding // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. - // The value must be URL encoded. + // The value must be URL-encoded. // // To copy a specific version of an object, append ?versionId= to // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). @@ -34347,6 +39348,10 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -34355,13 +39360,13 @@ type UploadPartCopyInput struct { CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request will fail with an HTTP - // 403 (Access Denied) error. + // bucket is owned by a different account, the request fails with the HTTP status + // code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The account ID of the expected source bucket owner. If the source bucket - // is owned by a different account, the request will fail with an HTTP 403 (Access - // Denied) error. + // is owned by a different account, the request fails with the HTTP status code + // 403 Forbidden (access denied). ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -34377,8 +39382,8 @@ type UploadPartCopyInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -34392,6 +39397,10 @@ type UploadPartCopyInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -34405,12 +39414,20 @@ type UploadPartCopyInput struct { UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartCopyInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartCopyInput) GoString() string { return s.String() } @@ -34637,8 +39654,12 @@ type UploadPartCopyOutput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -34646,12 +39667,20 @@ type UploadPartCopyOutput struct { ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartCopyOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartCopyOutput) GoString() string { return s.String() } @@ -34722,14 +39751,62 @@ type UploadPartInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // This checksum algorithm must be the same for all parts and it match the checksum + // value supplied in the CreateMultipartUpload request. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + // Size of the body in bytes. This parameter is useful when the size of the // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` @@ -34740,8 +39817,8 @@ type UploadPartInput struct { ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -34757,8 +39834,8 @@ type UploadPartInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -34772,6 +39849,10 @@ type UploadPartInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -34785,12 +39866,20 @@ type UploadPartInput struct { UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartInput) GoString() string { return s.String() } @@ -34842,6 +39931,36 @@ func (s *UploadPartInput) getBucket() (v string) { return *s.Bucket } +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *UploadPartInput) SetChecksumAlgorithm(v string) *UploadPartInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *UploadPartInput) SetChecksumCRC32(v string) *UploadPartInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *UploadPartInput) SetChecksumCRC32C(v string) *UploadPartInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *UploadPartInput) SetChecksumSHA1(v string) *UploadPartInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *UploadPartInput) SetChecksumSHA256(v string) *UploadPartInput { + s.ChecksumSHA256 = &v + return s +} + // SetContentLength sets the ContentLength field's value. func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { s.ContentLength = &v @@ -34943,6 +40062,38 @@ type UploadPartOutput struct { // encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. With multipart uploads, this + // may not be a checksum value of the object. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` @@ -34961,8 +40112,12 @@ type UploadPartOutput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key was used for the + // object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -34970,12 +40125,20 @@ type UploadPartOutput struct { ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartOutput) GoString() string { return s.String() } @@ -34986,6 +40149,30 @@ func (s *UploadPartOutput) SetBucketKeyEnabled(v bool) *UploadPartOutput { return s } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *UploadPartOutput) SetChecksumCRC32(v string) *UploadPartOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *UploadPartOutput) SetChecksumCRC32C(v string) *UploadPartOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *UploadPartOutput) SetChecksumSHA1(v string) *UploadPartOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *UploadPartOutput) SetChecksumSHA256(v string) *UploadPartOutput { + s.ChecksumSHA256 = &v + return s +} + // SetETag sets the ETag field's value. func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { s.ETag = &v @@ -35037,12 +40224,20 @@ type VersioningConfiguration struct { Status *string `type:"string" enum:"BucketVersioningStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s VersioningConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s VersioningConfiguration) GoString() string { return s.String() } @@ -35078,12 +40273,20 @@ type WebsiteConfiguration struct { RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WebsiteConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WebsiteConfiguration) GoString() string { return s.String() } @@ -35168,6 +40371,58 @@ type WriteGetObjectResponseInput struct { // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"x-amz-fwd-header-Cache-Control" type:"string"` + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 32-bit CRC32 checksum of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 32-bit CRC32C checksum of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 160-bit SHA-1 digest of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 256-bit SHA-256 digest of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha256" type:"string"` + // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"x-amz-fwd-header-Content-Disposition" type:"string"` @@ -35199,8 +40454,8 @@ type WriteGetObjectResponseInput struct { // A string that uniquely identifies an error condition. Returned in the // tag of the error XML response for a corresponding GetObject call. Cannot // be used with a successful StatusCode header or when the transformed object - // is provided in the body. All error codes from S3 are sentence-cased. Regex - // value is "^[A-Z][a-zA-Z]+$". + // is provided in the body. All error codes from S3 are sentence-cased. The + // regular expression (regex) value is "^[A-Z][a-zA-Z]+$". ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"` // Contains a generic description of the error condition. Returned in the @@ -35209,9 +40464,10 @@ type WriteGetObjectResponseInput struct { // is provided in body. ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"` - // If object stored in Amazon S3 expiration is configured (see PUT Bucket lifecycle) - // it includes expiry-date and rule-id key-value pairs providing object expiration - // information. The value of the rule-id is URL encoded. + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // that provide the object expiration information. The value of the rule-id + // is URL-encoded. Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"` // The date and time at which the object is no longer cacheable. @@ -35276,8 +40532,12 @@ type WriteGetObjectResponseInput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for stored in Amazon S3 object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // stored in Amazon S3 object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing requested object in @@ -35318,7 +40578,10 @@ type WriteGetObjectResponseInput struct { // * 503 - Service Unavailable StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"` - // The class of storage used to store object in Amazon S3. + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"` // The number of tags, if any, on the object. @@ -35328,12 +40591,20 @@ type WriteGetObjectResponseInput struct { VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WriteGetObjectResponseInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WriteGetObjectResponseInput) GoString() string { return s.String() } @@ -35381,6 +40652,30 @@ func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectR return s } +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumCRC32(v string) *WriteGetObjectResponseInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *WriteGetObjectResponseInput) SetChecksumCRC32C(v string) *WriteGetObjectResponseInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumSHA1(v string) *WriteGetObjectResponseInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumSHA256(v string) *WriteGetObjectResponseInput { + s.ChecksumSHA256 = &v + return s +} + // SetContentDisposition sets the ContentDisposition field's value. func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput { s.ContentDisposition = &v @@ -35583,12 +40878,20 @@ type WriteGetObjectResponseOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WriteGetObjectResponseOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WriteGetObjectResponseOutput) GoString() string { return s.String() } @@ -35805,6 +41108,42 @@ func BucketVersioningStatus_Values() []string { } } +const ( + // ChecksumAlgorithmCrc32 is a ChecksumAlgorithm enum value + ChecksumAlgorithmCrc32 = "CRC32" + + // ChecksumAlgorithmCrc32c is a ChecksumAlgorithm enum value + ChecksumAlgorithmCrc32c = "CRC32C" + + // ChecksumAlgorithmSha1 is a ChecksumAlgorithm enum value + ChecksumAlgorithmSha1 = "SHA1" + + // ChecksumAlgorithmSha256 is a ChecksumAlgorithm enum value + ChecksumAlgorithmSha256 = "SHA256" +) + +// ChecksumAlgorithm_Values returns all elements of the ChecksumAlgorithm enum +func ChecksumAlgorithm_Values() []string { + return []string{ + ChecksumAlgorithmCrc32, + ChecksumAlgorithmCrc32c, + ChecksumAlgorithmSha1, + ChecksumAlgorithmSha256, + } +} + +const ( + // ChecksumModeEnabled is a ChecksumMode enum value + ChecksumModeEnabled = "ENABLED" +) + +// ChecksumMode_Values returns all elements of the ChecksumMode enum +func ChecksumMode_Values() []string { + return []string{ + ChecksumModeEnabled, + } +} + const ( // CompressionTypeNone is a CompressionType enum value CompressionTypeNone = "NONE" @@ -35911,6 +41250,36 @@ const ( // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" + + // EventS3ObjectRestoreDelete is a Event enum value + EventS3ObjectRestoreDelete = "s3:ObjectRestore:Delete" + + // EventS3LifecycleTransition is a Event enum value + EventS3LifecycleTransition = "s3:LifecycleTransition" + + // EventS3IntelligentTiering is a Event enum value + EventS3IntelligentTiering = "s3:IntelligentTiering" + + // EventS3ObjectAclPut is a Event enum value + EventS3ObjectAclPut = "s3:ObjectAcl:Put" + + // EventS3LifecycleExpiration is a Event enum value + EventS3LifecycleExpiration = "s3:LifecycleExpiration:*" + + // EventS3LifecycleExpirationDelete is a Event enum value + EventS3LifecycleExpirationDelete = "s3:LifecycleExpiration:Delete" + + // EventS3LifecycleExpirationDeleteMarkerCreated is a Event enum value + EventS3LifecycleExpirationDeleteMarkerCreated = "s3:LifecycleExpiration:DeleteMarkerCreated" + + // EventS3ObjectTagging is a Event enum value + EventS3ObjectTagging = "s3:ObjectTagging:*" + + // EventS3ObjectTaggingPut is a Event enum value + EventS3ObjectTaggingPut = "s3:ObjectTagging:Put" + + // EventS3ObjectTaggingDelete is a Event enum value + EventS3ObjectTaggingDelete = "s3:ObjectTagging:Delete" ) // Event_Values returns all elements of the Event enum @@ -35933,6 +41302,16 @@ func Event_Values() []string { EventS3ReplicationOperationNotTracked, EventS3ReplicationOperationMissedThreshold, EventS3ReplicationOperationReplicatedAfterThreshold, + EventS3ObjectRestoreDelete, + EventS3LifecycleTransition, + EventS3IntelligentTiering, + EventS3ObjectAclPut, + EventS3LifecycleExpiration, + EventS3LifecycleExpirationDelete, + EventS3LifecycleExpirationDeleteMarkerCreated, + EventS3ObjectTagging, + EventS3ObjectTaggingPut, + EventS3ObjectTaggingDelete, } } @@ -36136,6 +41515,9 @@ const ( // InventoryOptionalFieldBucketKeyStatus is a InventoryOptionalField enum value InventoryOptionalFieldBucketKeyStatus = "BucketKeyStatus" + + // InventoryOptionalFieldChecksumAlgorithm is a InventoryOptionalField enum value + InventoryOptionalFieldChecksumAlgorithm = "ChecksumAlgorithm" ) // InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum @@ -36153,6 +41535,7 @@ func InventoryOptionalField_Values() []string { InventoryOptionalFieldObjectLockLegalHoldStatus, InventoryOptionalFieldIntelligentTieringAccessTier, InventoryOptionalFieldBucketKeyStatus, + InventoryOptionalFieldChecksumAlgorithm, } } @@ -36236,6 +41619,34 @@ func MetricsStatus_Values() []string { } } +const ( + // ObjectAttributesEtag is a ObjectAttributes enum value + ObjectAttributesEtag = "ETag" + + // ObjectAttributesChecksum is a ObjectAttributes enum value + ObjectAttributesChecksum = "Checksum" + + // ObjectAttributesObjectParts is a ObjectAttributes enum value + ObjectAttributesObjectParts = "ObjectParts" + + // ObjectAttributesStorageClass is a ObjectAttributes enum value + ObjectAttributesStorageClass = "StorageClass" + + // ObjectAttributesObjectSize is a ObjectAttributes enum value + ObjectAttributesObjectSize = "ObjectSize" +) + +// ObjectAttributes_Values returns all elements of the ObjectAttributes enum +func ObjectAttributes_Values() []string { + return []string{ + ObjectAttributesEtag, + ObjectAttributesChecksum, + ObjectAttributesObjectParts, + ObjectAttributesStorageClass, + ObjectAttributesObjectSize, + } +} + const ( // ObjectCannedACLPrivate is a ObjectCannedACL enum value ObjectCannedACLPrivate = "private" @@ -36340,12 +41751,21 @@ func ObjectLockRetentionMode_Values() []string { // // ObjectWriter - The uploading account will own the object if the object is // uploaded with the bucket-owner-full-control canned ACL. +// +// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer +// affect permissions. The bucket owner automatically owns and has full control +// over every object in the bucket. The bucket only accepts PUT requests that +// don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control +// canned ACL or an equivalent form of this ACL expressed in the XML format. const ( // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" // ObjectOwnershipObjectWriter is a ObjectOwnership enum value ObjectOwnershipObjectWriter = "ObjectWriter" + + // ObjectOwnershipBucketOwnerEnforced is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerEnforced = "BucketOwnerEnforced" ) // ObjectOwnership_Values returns all elements of the ObjectOwnership enum @@ -36353,6 +41773,7 @@ func ObjectOwnership_Values() []string { return []string{ ObjectOwnershipBucketOwnerPreferred, ObjectOwnershipObjectWriter, + ObjectOwnershipBucketOwnerEnforced, } } @@ -36380,6 +41801,9 @@ const ( // ObjectStorageClassOutposts is a ObjectStorageClass enum value ObjectStorageClassOutposts = "OUTPOSTS" + + // ObjectStorageClassGlacierIr is a ObjectStorageClass enum value + ObjectStorageClassGlacierIr = "GLACIER_IR" ) // ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum @@ -36393,6 +41817,7 @@ func ObjectStorageClass_Values() []string { ObjectStorageClassIntelligentTiering, ObjectStorageClassDeepArchive, ObjectStorageClassOutposts, + ObjectStorageClassGlacierIr, } } @@ -36584,8 +42009,8 @@ func RequestCharged_Values() []string { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information -// about downloading objects from requester pays buckets, see Downloading Objects -// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// about downloading objects from Requester Pays buckets, see Downloading Objects +// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. const ( // RequestPayerRequester is a RequestPayer enum value @@ -36667,6 +42092,9 @@ const ( // StorageClassOutposts is a StorageClass enum value StorageClassOutposts = "OUTPOSTS" + + // StorageClassGlacierIr is a StorageClass enum value + StorageClassGlacierIr = "GLACIER_IR" ) // StorageClass_Values returns all elements of the StorageClass enum @@ -36680,6 +42108,7 @@ func StorageClass_Values() []string { StorageClassGlacier, StorageClassDeepArchive, StorageClassOutposts, + StorageClassGlacierIr, } } @@ -36746,6 +42175,9 @@ const ( // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" + + // TransitionStorageClassGlacierIr is a TransitionStorageClass enum value + TransitionStorageClassGlacierIr = "GLACIER_IR" ) // TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum @@ -36756,6 +42188,7 @@ func TransitionStorageClass_Values() []string { TransitionStorageClassOnezoneIa, TransitionStorageClassIntelligentTiering, TransitionStorageClassDeepArchive, + TransitionStorageClassGlacierIr, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index ce87ab3..229606b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -1,7 +1,9 @@ package s3 import ( + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/s3shared/arn" "github.com/aws/aws-sdk-go/internal/s3shared/s3err" @@ -13,6 +15,14 @@ func init() { } func defaultInitClientFn(c *client.Client) { + if c.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateUnset { + if aws.BoolValue(c.Config.UseDualStack) { + c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled + } else { + c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateDisabled + } + } + // Support building custom endpoints based on config c.Handlers.Build.PushFront(endpointHandler) @@ -40,7 +50,7 @@ func defaultInitRequestFn(r *request.Request) { // Auto-populate LocationConstraint with current region r.Handlers.Validate.PushFront(populateLocationConstraint) case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: - r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarshalError) r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) case opPutObject, opUploadPart: r.Handlers.Build.PushBack(computeBodyHashes) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go index ba1a84d..f11bd9b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -3,6 +3,7 @@ package s3 import ( "fmt" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" "net/url" "strings" @@ -155,7 +156,7 @@ func endpointHandler(req *request.Request) { } case arn.OutpostAccessPointARN: // outposts does not support FIPS regions - if resReq.UseFIPS() { + if req.Config.UseFIPSEndpoint == endpoints.FIPSEndpointStateEnabled { req.Error = s3shared.NewFIPSConfigurationError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) return @@ -202,7 +203,7 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error { // DualStack not supported - if aws.BoolValue(req.Config.UseDualStack) { + if isUseDualStackEndpoint(req) { return s3shared.NewClientConfiguredForDualStackError(accessPoint, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } @@ -233,7 +234,7 @@ func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint a } // Dualstack not supported - if aws.BoolValue(req.Config.UseDualStack) { + if isUseDualStackEndpoint(req) { return s3shared.NewClientConfiguredForDualStackError(accessPoint, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } @@ -258,7 +259,7 @@ func removeBucketFromPath(u *url.URL) { func buildWriteGetObjectResponseEndpoint(req *request.Request) { // DualStack not supported - if aws.BoolValue(req.Config.UseDualStack) { + if isUseDualStackEndpoint(req) { req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil) return } @@ -273,7 +274,7 @@ func buildWriteGetObjectResponseEndpoint(req *request.Request) { signingRegion := req.ClientInfo.SigningRegion if !hasCustomEndpoint(req) { - endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), EndpointsID) + endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), req.ClientInfo.ResolvedRegion, EndpointsID) if err != nil { req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err) return @@ -289,3 +290,10 @@ func buildWriteGetObjectResponseEndpoint(req *request.Request) { redirectSigner(req, signingName, signingRegion) } + +func isUseDualStackEndpoint(req *request.Request) bool { + if req.Config.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + return req.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateEnabled + } + return aws.BoolValue(req.Config.UseDualStack) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go index 71e9c9e..7583be6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -41,23 +41,11 @@ type accessPointEndpointBuilder arn.AccessPointARN func (a accessPointEndpointBuilder) build(req *request.Request) error { resolveService := arn.AccessPointARN(a).Service resolveRegion := arn.AccessPointARN(a).Region - cfgRegion := aws.StringValue(req.Config.Region) - - if s3shared.IsFIPS(cfgRegion) { - if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { - // FIPS with cross region is not supported, the SDK must fail - // because there is no well defined method for SDK to construct a - // correct FIPS endpoint. - return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), - req.ClientInfo.PartitionID, cfgRegion, nil) - } - resolveRegion = cfgRegion - } - endpoint, err := resolveRegionalEndpoint(req, resolveRegion, resolveService) + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", resolveService) if err != nil { return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), - req.ClientInfo.PartitionID, cfgRegion, err) + req.ClientInfo.PartitionID, resolveRegion, err) } endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) @@ -107,23 +95,11 @@ type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN // func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error { resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region - cfgRegion := aws.StringValue(req.Config.Region) - - if s3shared.IsFIPS(cfgRegion) { - if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { - // FIPS with cross region is not supported, the SDK must fail - // because there is no well defined method for SDK to construct a - // correct FIPS endpoint. - return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.S3ObjectLambdaAccessPointARN(a), - req.ClientInfo.PartitionID, cfgRegion, nil) - } - resolveRegion = cfgRegion - } - endpoint, err := resolveRegionalEndpoint(req, resolveRegion, EndpointsID) + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", EndpointsID) if err != nil { return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a), - req.ClientInfo.PartitionID, cfgRegion, err) + req.ClientInfo.PartitionID, resolveRegion, err) } endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) @@ -181,7 +157,7 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { endpointsID = "s3" } - endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID) + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", endpointsID) if err != nil { return s3shared.NewFailedToResolveEndpointError(o, req.ClientInfo.PartitionID, resolveRegion, err) @@ -220,11 +196,16 @@ func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]st } } -func resolveRegionalEndpoint(r *request.Request, region string, endpointsID string) (endpoints.ResolvedEndpoint, error) { +func resolveRegionalEndpoint(r *request.Request, region, resolvedRegion, endpointsID string) (endpoints.ResolvedEndpoint, error) { return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.UseDualStackEndpoint = r.Config.UseDualStackEndpoint + opts.UseFIPSEndpoint = r.Config.UseFIPSEndpoint opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + opts.ResolvedRegion = resolvedRegion + opts.Logger = r.Config.Logger + opts.LogDeprecated = r.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) }) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go index 1e32fb9..d2eeca9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -244,6 +244,10 @@ type S3API interface { GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error) GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + GetObjectAttributes(*s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) + GetObjectAttributesWithContext(aws.Context, *s3.GetObjectAttributesInput, ...request.Option) (*s3.GetObjectAttributesOutput, error) + GetObjectAttributesRequest(*s3.GetObjectAttributesInput) (*request.Request, *s3.GetObjectAttributesOutput) + GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error) GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go index 9fa98fa..47d29bb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -124,6 +124,14 @@ func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) { // The Uploader structure that calls Upload(). It is safe to call Upload() // on this structure for multiple objects and across concurrent goroutines. // Mutating the Uploader's properties is not safe to be done concurrently. +// +// The ContentMD5 member for pre-computed MD5 checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, the +// ContentMD5 will be used. +// +// The Checksum members for pre-computed checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, will +// include the checksum member in the request. type Uploader struct { // The buffer size (in bytes) to use when buffering data into chunks and // sending them as parts to S3. The minimum allowed part size is 5MB, and diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index 1a6a703..1cd115f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -11,6 +11,14 @@ import ( // to an object in an Amazon S3 bucket. This type is similar to the s3 // package's PutObjectInput with the exception that the Body member is an // io.Reader instead of an io.ReadSeeker. +// +// The ContentMD5 member for pre-computed MD5 checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, the +// ContentMD5 will be used. +// +// The Checksum members for pre-computed checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, will +// include the checksum member in the request. type UploadInput struct { _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` @@ -35,9 +43,9 @@ type UploadInput struct { // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action using S3 on Outposts through the Amazon Web Services SDKs, + // using this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -57,6 +65,51 @@ type UploadInput struct { // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not + // using the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with + // the HTTP status code 400 Bad Request. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + // Specifies presentational information for the object. For more information, // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` @@ -76,6 +129,9 @@ type UploadInput struct { // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end // integrity check. For more information about REST request authentication, // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + // + // If the ContentMD5 is provided for a multipart upload, it will be ignored. + // Objects that will be uploaded in a single part, the ContentMD5 will be used. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // A standard MIME type describing the format of the contents. For more information, @@ -83,8 +139,8 @@ type UploadInput struct { ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request will fail with an HTTP 403 (Access Denied) - // error. + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. For more information, @@ -132,8 +188,8 @@ type UploadInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from requester pays buckets, see Downloading Objects - // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` @@ -160,12 +216,12 @@ type UploadInput struct { // If x-amz-server-side-encryption is present and has the value of aws:kms, // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed customer master key - // (CMK) that was used for the object. If you specify x-amz-server-side-encryption:aws:kms, - // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 - // uses the Amazon Web Services managed CMK in Amazon Web Services to protect - // the data. If the KMS key does not exist in the same account issuing the command, - // you must use the full ARN and not just the ID. + // (Amazon Web Services KMS) symmetrical customer managed key that was used + // for the object. If you specify x-amz-server-side-encryption:aws:kms, but + // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses + // the Amazon Web Services managed key to protect the data. If the KMS key does + // not exist in the same account issuing the command, you must use the full + // ARN and not just the ID. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 1b78b5d..9486f24 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -51,22 +51,23 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "s3" } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *S3 { svc := &S3{ Client: client.New( cfg, metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2006-03-01", + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2006-03-01", + ResolvedRegion: resolvedRegion, }, handlers, ), diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index 247770e..096adc0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -11,16 +11,21 @@ import ( "github.com/aws/aws-sdk-go/internal/sdkio" ) -func copyMultipartStatusOKUnmarhsalError(r *request.Request) { +func copyMultipartStatusOKUnmarshalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() if err != nil { r.Error = awserr.NewRequestFailure( awserr.New(request.ErrCodeSerialization, "unable to read response body", err), r.HTTPResponse.StatusCode, r.RequestID, ) + // Note, some middleware later in the stack like restxml.Unmarshal expect a valid, non-closed Body + // even in case of an error, so we replace it with an empty Reader. + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(nil)) return } + body := bytes.NewReader(b) r.HTTPResponse.Body = ioutil.NopCloser(body) defer body.Seek(0, sdkio.SeekStart) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go index 4498f28..948f060 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -513,12 +513,20 @@ type AccountInfo struct { EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccountInfo) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccountInfo) GoString() string { return s.String() } @@ -542,12 +550,16 @@ func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { } type GetRoleCredentialsInput struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" nopayload:"true"` // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the AWS SSO OIDC API Reference Guide. // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetRoleCredentialsInput's + // String and GoString methods. + // // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` @@ -562,12 +574,20 @@ type GetRoleCredentialsInput struct { RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetRoleCredentialsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetRoleCredentialsInput) GoString() string { return s.String() } @@ -616,12 +636,20 @@ type GetRoleCredentialsOutput struct { RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetRoleCredentialsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetRoleCredentialsOutput) GoString() string { return s.String() } @@ -641,12 +669,20 @@ type InvalidRequestException struct { Message_ *string `locationName:"message" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InvalidRequestException) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InvalidRequestException) GoString() string { return s.String() } @@ -690,12 +726,16 @@ func (s *InvalidRequestException) RequestID() string { } type ListAccountRolesInput struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" nopayload:"true"` // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the AWS SSO OIDC API Reference Guide. // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountRolesInput's + // String and GoString methods. + // // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` @@ -712,12 +752,20 @@ type ListAccountRolesInput struct { NextToken *string `location:"querystring" locationName:"next_token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountRolesInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountRolesInput) GoString() string { return s.String() } @@ -776,12 +824,20 @@ type ListAccountRolesOutput struct { RoleList []*RoleInfo `locationName:"roleList" type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountRolesOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountRolesOutput) GoString() string { return s.String() } @@ -799,12 +855,16 @@ func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOut } type ListAccountsInput struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" nopayload:"true"` // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the AWS SSO OIDC API Reference Guide. // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountsInput's + // String and GoString methods. + // // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` @@ -816,12 +876,20 @@ type ListAccountsInput struct { NextToken *string `location:"querystring" locationName:"next_token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountsInput) GoString() string { return s.String() } @@ -871,12 +939,20 @@ type ListAccountsOutput struct { NextToken *string `locationName:"nextToken" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountsOutput) GoString() string { return s.String() } @@ -894,22 +970,34 @@ func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { } type LogoutInput struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" nopayload:"true"` // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the AWS SSO OIDC API Reference Guide. // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by LogoutInput's + // String and GoString methods. + // // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LogoutInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LogoutInput) GoString() string { return s.String() } @@ -937,12 +1025,20 @@ type LogoutOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LogoutOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LogoutOutput) GoString() string { return s.String() } @@ -955,12 +1051,20 @@ type ResourceNotFoundException struct { Message_ *string `locationName:"message" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ResourceNotFoundException) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ResourceNotFoundException) GoString() string { return s.String() } @@ -1020,20 +1124,36 @@ type RoleCredentials struct { // The key that is used to sign the request. For more information, see Using // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` // The token used for temporary credentials. For more information, see Using // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoleCredentials) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoleCredentials) GoString() string { return s.String() } @@ -1073,12 +1193,20 @@ type RoleInfo struct { RoleName *string `locationName:"roleName" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoleInfo) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoleInfo) GoString() string { return s.String() } @@ -1104,12 +1232,20 @@ type TooManyRequestsException struct { Message_ *string `locationName:"message" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TooManyRequestsException) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TooManyRequestsException) GoString() string { return s.String() } @@ -1161,12 +1297,20 @@ type UnauthorizedException struct { Message_ *string `locationName:"message" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UnauthorizedException) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UnauthorizedException) GoString() string { return s.String() } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go index 3517533..7a28dc7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -52,22 +52,23 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "awsssoportal" } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO { svc := &SSO{ Client: client.New( cfg, metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2019-06-10", + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, }, handlers, ), diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 3cffd53..f1a7bfd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -63,14 +63,15 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // or for cross-account access. For a comparison of AssumeRole with other API // operations that produce temporary credentials, see Requesting Temporary Security // Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // Permissions // // The temporary security credentials created by AssumeRole can be used to make // API calls to any Amazon Web Services service with the following exception: -// You cannot call the STS GetFederationToken or GetSessionToken API operations. +// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an @@ -86,26 +87,33 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// To assume a role from a different account, your account must be trusted by -// the role. The trust relationship is defined in the role's trust policy when -// the role is created. That trust policy states which accounts are allowed -// to delegate that access to users in the account. +// When you create a role, you create two policies: A role trust policy that +// specifies who can assume the role and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal who is +// allowed to assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. // // A user who wants to access a role in a different account must also have permissions // that are delegated from the user account administrator. The administrator // must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. If the user is in the same account as the -// role, then you can do either of the following: +// of the role in the other account. // -// * Attach a policy to the user (identical to the previous user in a different -// account). +// To allow a user to assume a role in the same account, you can do either of +// the following: +// +// * Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). // // * Add the user as a principal directly in the role's trust policy. // -// In this case, the trust policy acts as an IAM resource-based policy. Users -// in the same account as the role do not need explicit permission to assume -// the role. For more information about trust policies and resource-based policies, -// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the +// same account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // // Tags @@ -170,7 +178,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" @@ -258,7 +266,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Services access without user-specific credentials or configuration. For a // comparison of AssumeRoleWithSAML with the other API operations that produce // temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this operation consist of @@ -403,7 +411,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" @@ -499,8 +507,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // Returns a set of temporary security credentials for users who have been authenticated // in a mobile or web application with a web identity provider. Example providers -// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID -// Connect-compatible identity provider. +// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID +// Connect-compatible identity provider such as Google or Amazon Cognito federated +// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). // // For mobile applications, we recommend that you use Amazon Cognito. You can // use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide @@ -523,7 +532,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this API consist of an access @@ -661,7 +670,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" @@ -776,10 +785,11 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // code. // // The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the operation +// contain privileged information that the user who requested the operation // should not see. To decode an authorization status message, a user must be -// granted permissions via an IAM policy to request the DecodeAuthorizationMessage -// (sts:DecodeAuthorizationMessage) action. +// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) +// action. // // The decoded message includes the following type of information: // @@ -1065,7 +1075,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // For a comparison of GetFederationToken with the other API operations that // produce temporary credentials, see Requesting Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // You can create a mobile-based or browser-based app that can authenticate @@ -1088,9 +1098,9 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using Amazon Web Services account root user credentials have -// a maximum duration of 3,600 seconds (1 hour). +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained +// by using the Amazon Web Services account root user credentials have a maximum +// duration of 3,600 seconds (1 hour). // // Permissions // @@ -1141,63 +1151,6 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using Amazon Web Services account root user credentials have -// a maximum duration of 3,600 seconds (1 hour). -// -// Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: -// -// * You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. -// -// * You cannot call any STS operations except GetCallerIdentity. -// -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM -// user policies and the session policies that you pass. This gives you a way -// to further restrict the permissions for a federated user. You cannot use -// session policies to grant more permissions than those that are defined in -// the permissions policy of the IAM user. For more information, see Session -// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session -// in the Principal element of the policy, the session has the permissions allowed -// by the policy. These permissions are granted in addition to the permissions -// granted by the session policies. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see Passing Session -// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// // An administrator must grant you the permissions necessary to pass session // tags. The administrator can also create granular permissions to allow you // to pass only specific session tags. For more information, see Tutorial: Using @@ -1234,7 +1187,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" @@ -1323,7 +1276,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // then the API returns an access denied error. For a comparison of GetSessionToken // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose +// of the sts:GetSessionToken operation is to authenticate the user using MFA. +// You cannot use policies to control authentication operations. For more information, +// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) // in the IAM User Guide. // // Session Duration @@ -1404,15 +1363,23 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken type AssumeRoleInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value specified can can - // range from 900 seconds (15 minutes) up to the maximum session duration that - // is set for the role. The maximum session duration setting can have a value - // from 1 hour to 12 hours. If you specify a value higher than this setting - // or the administrator setting (whichever is lower), the operation fails. For - // example, if you specify a session duration of 12 hours, but your administrator - // set the maximum session duration to 6 hours, your operation fails. To learn - // how to view the maximum value for your role, see View the Maximum Session - // Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for + // the role. The maximum session duration setting can have a value from 1 hour + // to 12 hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services + // API role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of + // up to 43200 seconds (12 hours), depending on the maximum session duration + // setting for your role. However, if you assume a role using role chaining + // and provide a DurationSeconds parameter value greater than one hour, the + // operation fails. To learn how to view the maximum value for your role, see + // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1422,7 +1389,7 @@ type AssumeRoleInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1548,7 +1515,7 @@ type AssumeRoleInput struct { // A list of session tags that you want to pass. Each session tag consists of // a key name and an associated value. For more information about session tags, - // see Tagging STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // This parameter is optional. You can pass up to 50 session tags. The plaintext @@ -1577,7 +1544,7 @@ type AssumeRoleInput struct { // the new session inherits any transitive session tags from the calling session. // If you pass a session tag with the same key as an inherited tag, the operation // fails. To view the inherited tags for a session, see the CloudTrail logs. - // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) // in the IAM User Guide. Tags []*Tag `type:"list"` @@ -1605,12 +1572,20 @@ type AssumeRoleInput struct { TransitiveTagKeys []*string `type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleInput) GoString() string { return s.String() } @@ -1785,12 +1760,20 @@ type AssumeRoleOutput struct { SourceIdentity *string `min:"2" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleOutput) GoString() string { return s.String() } @@ -1842,7 +1825,7 @@ type AssumeRoleWithSAMLInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1917,12 +1900,20 @@ type AssumeRoleWithSAMLInput struct { SAMLAssertion *string `min:"4" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithSAMLInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithSAMLInput) GoString() string { return s.String() } @@ -2089,12 +2080,20 @@ type AssumeRoleWithSAMLOutput struct { SubjectType *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithSAMLOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithSAMLOutput) GoString() string { return s.String() } @@ -2173,7 +2172,7 @@ type AssumeRoleWithWebIdentityInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -2228,11 +2227,12 @@ type AssumeRoleWithWebIdentityInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` - // The fully qualified host component of the domain name of the identity provider. + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. // // Do not specify this value for OpenID Connect ID tokens. ProviderId *string `min:"4" type:"string"` @@ -2264,12 +2264,20 @@ type AssumeRoleWithWebIdentityInput struct { WebIdentityToken *string `min:"4" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithWebIdentityInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithWebIdentityInput) GoString() string { return s.String() } @@ -2432,12 +2440,20 @@ type AssumeRoleWithWebIdentityOutput struct { SubjectFromWebIdentityToken *string `min:"6" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithWebIdentityOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithWebIdentityOutput) GoString() string { return s.String() } @@ -2505,12 +2521,20 @@ type AssumedRoleUser struct { AssumedRoleId *string `min:"2" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumedRoleUser) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumedRoleUser) GoString() string { return s.String() } @@ -2552,12 +2576,20 @@ type Credentials struct { SessionToken *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Credentials) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Credentials) GoString() string { return s.String() } @@ -2595,12 +2627,20 @@ type DecodeAuthorizationMessageInput struct { EncodedMessage *string `min:"1" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DecodeAuthorizationMessageInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DecodeAuthorizationMessageInput) GoString() string { return s.String() } @@ -2633,16 +2673,24 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` - // An XML document that contains the decoded message. + // The API returns a response with the decoded message. DecodedMessage *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DecodeAuthorizationMessageOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DecodeAuthorizationMessageOutput) GoString() string { return s.String() } @@ -2672,12 +2720,20 @@ type FederatedUser struct { FederatedUserId *string `min:"2" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s FederatedUser) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s FederatedUser) GoString() string { return s.String() } @@ -2706,12 +2762,20 @@ type GetAccessKeyInfoInput struct { AccessKeyId *string `min:"16" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetAccessKeyInfoInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetAccessKeyInfoInput) GoString() string { return s.String() } @@ -2745,12 +2809,20 @@ type GetAccessKeyInfoOutput struct { Account *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetAccessKeyInfoOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetAccessKeyInfoOutput) GoString() string { return s.String() } @@ -2765,12 +2837,20 @@ type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetCallerIdentityInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetCallerIdentityInput) GoString() string { return s.String() } @@ -2794,12 +2874,20 @@ type GetCallerIdentityOutput struct { UserId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetCallerIdentityOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetCallerIdentityOutput) GoString() string { return s.String() } @@ -2949,12 +3037,20 @@ type GetFederationTokenInput struct { Tags []*Tag `type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetFederationTokenInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetFederationTokenInput) GoString() string { return s.String() } @@ -3057,12 +3153,20 @@ type GetFederationTokenOutput struct { PackedPolicySize *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetFederationTokenOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetFederationTokenOutput) GoString() string { return s.String() } @@ -3101,8 +3205,8 @@ type GetSessionTokenInput struct { // user has a policy that requires MFA authentication. The value is either the // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the Management Console - // and viewing the user's security credentials. + // You can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can @@ -3120,12 +3224,20 @@ type GetSessionTokenInput struct { TokenCode *string `min:"6" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetSessionTokenInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetSessionTokenInput) GoString() string { return s.String() } @@ -3181,12 +3293,20 @@ type GetSessionTokenOutput struct { Credentials *Credentials `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetSessionTokenOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetSessionTokenOutput) GoString() string { return s.String() } @@ -3209,12 +3329,20 @@ type PolicyDescriptorType struct { Arn *string `locationName:"arn" min:"20" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PolicyDescriptorType) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PolicyDescriptorType) GoString() string { return s.String() } @@ -3240,9 +3368,9 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { // You can pass custom key-value pair attributes when you assume a role or federate // a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging STS Sessions -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in -// the IAM User Guide. +// to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. type Tag struct { _ struct{} `type:"structure"` @@ -3267,12 +3395,20 @@ type Tag struct { Value *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tag) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tag) GoString() string { return s.String() } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 7897d70..b680bbd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -67,7 +67,7 @@ const ( // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index d34a685..f324ff1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -48,22 +48,27 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS { svc := &STS{ Client: client.New( cfg, metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2011-06-15", + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + ResolvedRegion: resolvedRegion, }, handlers, ), diff --git a/vendor/github.com/jmespath/go-jmespath/go.mod b/vendor/github.com/jmespath/go-jmespath/go.mod deleted file mode 100644 index 4d448e8..0000000 --- a/vendor/github.com/jmespath/go-jmespath/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/jmespath/go-jmespath - -go 1.14 - -require github.com/jmespath/go-jmespath/internal/testify v1.5.1 diff --git a/vendor/github.com/jmespath/go-jmespath/go.sum b/vendor/github.com/jmespath/go-jmespath/go.sum deleted file mode 100644 index d2db411..0000000 --- a/vendor/github.com/jmespath/go-jmespath/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md index 746621f..e455133 100644 --- a/vendor/github.com/mattn/go-sqlite3/README.md +++ b/vendor/github.com/mattn/go-sqlite3/README.md @@ -7,17 +7,17 @@ go-sqlite3 [![codecov](https://codecov.io/gh/mattn/go-sqlite3/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-sqlite3) [![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-sqlite3)](https://goreportcard.com/report/github.com/mattn/go-sqlite3) -Latest stable version is v1.14 or later not v2. +Latest stable version is v1.14 or later, not v2. ~~**NOTE:** The increase to v2 was an accident. There were no major changes or features.~~ # Description -sqlite3 driver conforming to the built-in database/sql interface +A sqlite3 driver that conforms to the built-in database/sql interface. -Supported Golang version: See [.github/workflows/go.yaml](./.github/workflows/go.yaml) +Supported Golang version: See [.github/workflows/go.yaml](./.github/workflows/go.yaml). -[This package follows the official Golang Release Policy.](https://golang.org/doc/devel/release.html#policy) +This package follows the official [Golang Release Policy](https://golang.org/doc/devel/release.html#policy). ### Overview @@ -64,7 +64,7 @@ Supported Golang version: See [.github/workflows/go.yaml](./.github/workflows/go # Installation -This package can be installed with the go get command: +This package can be installed with the `go get` command: go get github.com/mattn/go-sqlite3 @@ -72,28 +72,28 @@ _go-sqlite3_ is *cgo* package. If you want to build your app using go-sqlite3, you need gcc. However, after you have built and installed _go-sqlite3_ with `go install github.com/mattn/go-sqlite3` (which requires gcc), you can build your app without relying on gcc in future. -***Important: because this is a `CGO` enabled package you are required to set the environment variable `CGO_ENABLED=1` and have a `gcc` compile present within your path.*** +***Important: because this is a `CGO` enabled package, you are required to set the environment variable `CGO_ENABLED=1` and have a `gcc` compile present within your path.*** # API Reference -API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3 +API documentation can be found [here](http://godoc.org/github.com/mattn/go-sqlite3). -Examples can be found under the [examples](./_example) directory +Examples can be found under the [examples](./_example) directory. # Connection String When creating a new SQLite database or connection to an existing one, with the file name additional options can be given. -This is also known as a DSN string. (Data Source Name). +This is also known as a DSN (Data Source Name) string. Options are append after the filename of the SQLite database. -The database filename and options are seperated by an `?` (Question Mark). +The database filename and options are separated by an `?` (Question Mark). Options should be URL-encoded (see [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)). This also applies when using an in-memory database instead of a file. Options can be given using the following format: `KEYWORD=VALUE` and multiple options can be combined with the `&` ampersand. -This library supports dsn options of SQLite itself and provides additional options. +This library supports DSN options of SQLite itself and provides additional options. Boolean values can be one of: * `0` `no` `false` `off` @@ -138,19 +138,18 @@ file:test.db?cache=shared&mode=memory This package allows additional configuration of features available within SQLite3 to be enabled or disabled by golang build constraints also known as build `tags`. -[Click here for more information about build tags / constraints.](https://golang.org/pkg/go/build/#hdr-Build_Constraints) +Click [here](https://golang.org/pkg/go/build/#hdr-Build_Constraints) for more information about build tags / constraints. ### Usage -If you wish to build this library with additional extensions / features. -Use the following command. +If you wish to build this library with additional extensions / features, use the following command: ```bash go build --tags "" ``` -For available features see the extension list. -When using multiple build tags, all the different tags should be space delimted. +For available features, see the extension list. +When using multiple build tags, all the different tags should be space delimited. Example: @@ -181,9 +180,9 @@ go build --tags "icu json1 fts5 secure_delete" # Compilation -This package requires `CGO_ENABLED=1` ennvironment variable if not set by default, and the presence of the `gcc` compiler. +This package requires the `CGO_ENABLED=1` ennvironment variable if not set by default, and the presence of the `gcc` compiler. -If you need to add additional CFLAGS or LDFLAGS to the build command, and do not want to modify this package. Then this can be achieved by using the `CGO_CFLAGS` and `CGO_LDFLAGS` environment variables. +If you need to add additional CFLAGS or LDFLAGS to the build command, and do not want to modify this package, then this can be achieved by using the `CGO_CFLAGS` and `CGO_LDFLAGS` environment variables. ## Android @@ -198,7 +197,7 @@ For more information see [#201](https://github.com/mattn/go-sqlite3/issues/201) # ARM -To compile for `ARM` use the following environment. +To compile for `ARM` use the following environment: ```bash env CC=arm-linux-gnueabihf-gcc CXX=arm-linux-gnueabihf-g++ \ @@ -234,7 +233,7 @@ Please work only with compiled final binaries. ## Linux -To compile this package on Linux you must install the development tools for your linux distribution. +To compile this package on Linux, you must install the development tools for your linux distribution. To compile under linux use the build tag `linux`. @@ -250,7 +249,7 @@ go build --tags "libsqlite3 linux" ### Alpine -When building in an `alpine` container run the following command before building. +When building in an `alpine` container run the following command before building: ``` apk add --update gcc musl-dev @@ -270,29 +269,29 @@ sudo apt-get install build-essential ## Mac OSX -OSX should have all the tools present to compile this package, if not install XCode this will add all the developers tools. +OSX should have all the tools present to compile this package. If not, install XCode to add all the developers tools. -Required dependency +Required dependency: ```bash brew install sqlite3 ``` -For OSX there is an additional package install which is required if you wish to build the `icu` extension. +For OSX, there is an additional package to install which is required if you wish to build the `icu` extension. -This additional package can be installed with `homebrew`. +This additional package can be installed with `homebrew`: ```bash brew upgrade icu4c ``` -To compile for Mac OSX. +To compile for Mac OSX: ```bash go build --tags "darwin" ``` -If you wish to link directly to libsqlite3 then you can use the `libsqlite3` build tag. +If you wish to link directly to libsqlite3, use the `libsqlite3` build tag: ``` go build --tags "libsqlite3 darwin" @@ -304,14 +303,14 @@ Additional information: ## Windows -To compile this package on Windows OS you must have the `gcc` compiler installed. +To compile this package on Windows, you must have the `gcc` compiler installed. 1) Install a Windows `gcc` toolchain. -2) Add the `bin` folders to the Windows path if the installer did not do this by default. -3) Open a terminal for the TDM-GCC toolchain, can be found in the Windows Start menu. +2) Add the `bin` folder to the Windows path, if the installer did not do this by default. +3) Open a terminal for the TDM-GCC toolchain, which can be found in the Windows Start menu. 4) Navigate to your project folder and run the `go build ...` command for this package. -For example the TDM-GCC Toolchain can be found [here](https://sourceforge.net/projects/tdm-gcc/). +For example the TDM-GCC Toolchain can be found [here](https://jmeubank.github.io/tdm-gcc/). ## Errors @@ -349,28 +348,28 @@ This package supports the SQLite User Authentication module. ## Compile -To use the User authentication module the package has to be compiled with the tag `sqlite_userauth`. See [Features](#features). +To use the User authentication module, the package has to be compiled with the tag `sqlite_userauth`. See [Features](#features). ## Usage ### Create protected database -To create a database protected by user authentication provide the following argument to the connection string `_auth`. +To create a database protected by user authentication, provide the following argument to the connection string `_auth`. This will enable user authentication within the database. This option however requires two additional arguments: - `_auth_user` - `_auth_pass` -When `_auth` is present on the connection string user authentication will be enabled and the provided user will be created +When `_auth` is present in the connection string user authentication will be enabled and the provided user will be created as an `admin` user. After initial creation, the parameter `_auth` has no effect anymore and can be omitted from the connection string. -Example connection string: +Example connection strings: -Create an user authentication database with user `admin` and password `admin`. +Create an user authentication database with user `admin` and password `admin`: `file:test.s3db?_auth&_auth_user=admin&_auth_pass=admin` -Create an user authentication database with user `admin` and password `admin` and use `SHA1` for the password encoding. +Create an user authentication database with user `admin` and password `admin` and use `SHA1` for the password encoding: `file:test.s3db?_auth&_auth_user=admin&_auth_pass=admin&_auth_crypt=sha1` @@ -396,11 +395,11 @@ salt this can be configured with `_auth_salt`. ### Restrictions -Operations on the database regarding to user management can only be preformed by an administrator user. +Operations on the database regarding user management can only be preformed by an administrator user. ### Support -The user authentication supports two kinds of users +The user authentication supports two kinds of users: - administrators - regular users @@ -411,7 +410,7 @@ User management can be done by directly using the `*SQLiteConn` or by SQL. #### SQL -The following sql functions are available for user management. +The following sql functions are available for user management: | Function | Arguments | Description | |----------|-----------|-------------| @@ -420,7 +419,7 @@ The following sql functions are available for user management. | `auth_user_change` | username `string`, password `string`, admin `int` | Function to modify an user. Users can change their own password, but only an administrator can change the administrator flag. | | `authUserDelete` | username `string` | Delete an user from the database. Can only be used by an administrator. The current logged in administrator cannot be deleted. This is to make sure their is always an administrator remaining. | -These functions will return an integer. +These functions will return an integer: - 0 (SQLITE_OK) - 23 (SQLITE_AUTH) Failed to perform due to authentication or insufficient privileges @@ -441,7 +440,7 @@ SELECT user_delete('user'); #### *SQLiteConn -The following functions are available for User authentication from the `*SQLiteConn`. +The following functions are available for User authentication from the `*SQLiteConn`: | Function | Description | |----------|-------------| @@ -452,16 +451,16 @@ The following functions are available for User authentication from the `*SQLiteC ### Attached database -When using attached databases. SQLite will use the authentication from the `main` database for the attached database(s). +When using attached databases, SQLite will use the authentication from the `main` database for the attached database(s). # Extensions -If you want your own extension to be listed here or you want to add a reference to an extension; please submit an Issue for this. +If you want your own extension to be listed here, or you want to add a reference to an extension; please submit an Issue for this. ## Spatialite Spatialite is available as an extension to SQLite, and can be used in combination with this repository. -For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatialite). +For an example, see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatialite). ## extension-functions.c from SQLite3 Contrib @@ -471,7 +470,7 @@ extension-functions.c is available as an extension to SQLite, and provides the f - String: replicate, charindex, leftstr, rightstr, ltrim, rtrim, trim, replace, reverse, proper, padl, padr, padc, strfilter. - Aggregate: stdev, variance, mode, median, lower_quartile, upper_quartile -For an example see [dinedal/go-sqlite3-extension-functions](https://github.com/dinedal/go-sqlite3-extension-functions). +For an example, see [dinedal/go-sqlite3-extension-functions](https://github.com/dinedal/go-sqlite3-extension-functions). # FAQ @@ -491,7 +490,7 @@ For an example see [dinedal/go-sqlite3-extension-functions](https://github.com/d - Can I use this in multiple routines concurrently? - Yes for readonly. But, No for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209), [#274](https://github.com/mattn/go-sqlite3/issues/274). + Yes for readonly. But not for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209), [#274](https://github.com/mattn/go-sqlite3/issues/274). - Why I'm getting `no such table` error? @@ -505,7 +504,7 @@ For an example see [dinedal/go-sqlite3-extension-functions](https://github.com/d Note that if the last database connection in the pool closes, the in-memory database is deleted. Make sure the [max idle connection limit](https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns) is > 0, and the [connection lifetime](https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime) is infinite. - For more information see + For more information see: * [#204](https://github.com/mattn/go-sqlite3/issues/204) * [#511](https://github.com/mattn/go-sqlite3/issues/511) * https://www.sqlite.org/sharedcache.html#shared_cache_and_in_memory_databases @@ -515,20 +514,20 @@ For an example see [dinedal/go-sqlite3-extension-functions](https://github.com/d OS X limits OS-wide to not have more than 1000 files open simultaneously by default. - For more information see [#289](https://github.com/mattn/go-sqlite3/issues/289) + For more information, see [#289](https://github.com/mattn/go-sqlite3/issues/289) - Trying to execute a `.` (dot) command throws an error. Error: `Error: near ".": syntax error` - Dot command are part of SQLite3 CLI not of this library. + Dot command are part of SQLite3 CLI, not of this library. You need to implement the feature or call the sqlite3 cli. - More information see [#305](https://github.com/mattn/go-sqlite3/issues/305) + More information see [#305](https://github.com/mattn/go-sqlite3/issues/305). - Error: `database is locked` - When you get a database is locked. Please use the following options. + When you get a database is locked, please use the following options. Add to DSN: `cache=shared` @@ -537,24 +536,24 @@ For an example see [dinedal/go-sqlite3-extension-functions](https://github.com/d db, err := sql.Open("sqlite3", "file:locked.sqlite?cache=shared") ``` - Second please set the database connections of the SQL package to 1. + Next, please set the database connections of the SQL package to 1: ```go db.SetMaxOpenConns(1) ``` - More information see [#209](https://github.com/mattn/go-sqlite3/issues/209) + For more information, see [#209](https://github.com/mattn/go-sqlite3/issues/209). ## Contributors ### Code Contributors -This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. +This project exists thanks to all the people who [[contribute](CONTRIBUTING.md)]. ### Financial Contributors -Become a financial contributor and help us sustain our community. [[Contribute](https://opencollective.com/mattn-go-sqlite3/contribute)] +Become a financial contributor and help us sustain our community. [[Contribute here](https://opencollective.com/mattn-go-sqlite3/contribute)]. #### Individuals diff --git a/vendor/github.com/mattn/go-sqlite3/backup.go b/vendor/github.com/mattn/go-sqlite3/backup.go index e222cc8..ecbb469 100644 --- a/vendor/github.com/mattn/go-sqlite3/backup.go +++ b/vendor/github.com/mattn/go-sqlite3/backup.go @@ -7,7 +7,7 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go index c3ce752..d305691 100644 --- a/vendor/github.com/mattn/go-sqlite3/callback.go +++ b/vendor/github.com/mattn/go-sqlite3/callback.go @@ -12,7 +12,7 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif @@ -353,6 +353,20 @@ func callbackRetNil(ctx *C.sqlite3_context, v reflect.Value) error { return nil } +func callbackRetGeneric(ctx *C.sqlite3_context, v reflect.Value) error { + if v.IsNil() { + C.sqlite3_result_null(ctx) + return nil + } + + cb, err := callbackRet(v.Elem().Type()) + if err != nil { + return err + } + + return cb(ctx, v.Elem()) +} + func callbackRet(typ reflect.Type) (callbackRetConverter, error) { switch typ.Kind() { case reflect.Interface: @@ -360,6 +374,11 @@ func callbackRet(typ reflect.Type) (callbackRetConverter, error) { if typ.Implements(errorInterface) { return callbackRetNil, nil } + + if typ.NumMethod() == 0 { + return callbackRetGeneric, nil + } + fallthrough case reflect.Slice: if typ.Elem().Kind() != reflect.Uint8 { diff --git a/vendor/github.com/mattn/go-sqlite3/error.go b/vendor/github.com/mattn/go-sqlite3/error.go index 696281c..58ab252 100644 --- a/vendor/github.com/mattn/go-sqlite3/error.go +++ b/vendor/github.com/mattn/go-sqlite3/error.go @@ -7,7 +7,7 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif diff --git a/vendor/github.com/mattn/go-sqlite3/go.mod b/vendor/github.com/mattn/go-sqlite3/go.mod deleted file mode 100644 index 3d0854a..0000000 --- a/vendor/github.com/mattn/go-sqlite3/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/mattn/go-sqlite3 - -go 1.12 diff --git a/vendor/github.com/mattn/go-sqlite3/go.sum b/vendor/github.com/mattn/go-sqlite3/go.sum deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index ac5d0b7..102821b 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.36.0. By combining all the individual C code files into this +** version 3.38.5. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -23,793 +23,6 @@ #ifndef SQLITE_PRIVATE # define SQLITE_PRIVATE static #endif -/************** Begin file ctime.c *******************************************/ -/* -** 2010 February 23 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file implements routines used to report what compile-time options -** SQLite was built with. -*/ - -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS /* IMP: R-16824-07538 */ - -/* -** Include the configuration header output by 'configure' if we're using the -** autoconf-based build -*/ -#if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H) -#include "config.h" -#define SQLITECONFIG_H 1 -#endif - -/* These macros are provided to "stringify" the value of the define -** for those options in which the value is meaningful. */ -#define CTIMEOPT_VAL_(opt) #opt -#define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt) - -/* Like CTIMEOPT_VAL, but especially for SQLITE_DEFAULT_LOOKASIDE. This -** option requires a separate macro because legal values contain a single -** comma. e.g. (-DSQLITE_DEFAULT_LOOKASIDE="100,100") */ -#define CTIMEOPT_VAL2_(opt1,opt2) #opt1 "," #opt2 -#define CTIMEOPT_VAL2(opt) CTIMEOPT_VAL2_(opt) - -/* -** An array of names of all compile-time options. This array should -** be sorted A-Z. -** -** This array looks large, but in a typical installation actually uses -** only a handful of compile-time options, so most times this array is usually -** rather short and uses little memory space. -*/ -static const char * const sqlite3azCompileOpt[] = { - -/* -** BEGIN CODE GENERATED BY tool/mkctime.tcl -*/ -#if SQLITE_32BIT_ROWID - "32BIT_ROWID", -#endif -#if SQLITE_4_BYTE_ALIGNED_MALLOC - "4_BYTE_ALIGNED_MALLOC", -#endif -#if SQLITE_64BIT_STATS - "64BIT_STATS", -#endif -#ifdef SQLITE_ALLOW_COVERING_INDEX_SCAN -# if SQLITE_ALLOW_COVERING_INDEX_SCAN != 1 - "ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN), -# endif -#endif -#if SQLITE_ALLOW_URI_AUTHORITY - "ALLOW_URI_AUTHORITY", -#endif -#ifdef SQLITE_BITMASK_TYPE - "BITMASK_TYPE=" CTIMEOPT_VAL(SQLITE_BITMASK_TYPE), -#endif -#if SQLITE_BUG_COMPATIBLE_20160819 - "BUG_COMPATIBLE_20160819", -#endif -#if SQLITE_CASE_SENSITIVE_LIKE - "CASE_SENSITIVE_LIKE", -#endif -#if SQLITE_CHECK_PAGES - "CHECK_PAGES", -#endif -#if defined(__clang__) && defined(__clang_major__) - "COMPILER=clang-" CTIMEOPT_VAL(__clang_major__) "." - CTIMEOPT_VAL(__clang_minor__) "." - CTIMEOPT_VAL(__clang_patchlevel__), -#elif defined(_MSC_VER) - "COMPILER=msvc-" CTIMEOPT_VAL(_MSC_VER), -#elif defined(__GNUC__) && defined(__VERSION__) - "COMPILER=gcc-" __VERSION__, -#endif -#if SQLITE_COVERAGE_TEST - "COVERAGE_TEST", -#endif -#if SQLITE_DEBUG - "DEBUG", -#endif -#if SQLITE_DEFAULT_AUTOMATIC_INDEX - "DEFAULT_AUTOMATIC_INDEX", -#endif -#if SQLITE_DEFAULT_AUTOVACUUM - "DEFAULT_AUTOVACUUM", -#endif -#ifdef SQLITE_DEFAULT_CACHE_SIZE - "DEFAULT_CACHE_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_CACHE_SIZE), -#endif -#if SQLITE_DEFAULT_CKPTFULLFSYNC - "DEFAULT_CKPTFULLFSYNC", -#endif -#ifdef SQLITE_DEFAULT_FILE_FORMAT - "DEFAULT_FILE_FORMAT=" CTIMEOPT_VAL(SQLITE_DEFAULT_FILE_FORMAT), -#endif -#ifdef SQLITE_DEFAULT_FILE_PERMISSIONS - "DEFAULT_FILE_PERMISSIONS=" CTIMEOPT_VAL(SQLITE_DEFAULT_FILE_PERMISSIONS), -#endif -#if SQLITE_DEFAULT_FOREIGN_KEYS - "DEFAULT_FOREIGN_KEYS", -#endif -#ifdef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT - "DEFAULT_JOURNAL_SIZE_LIMIT=" CTIMEOPT_VAL(SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT), -#endif -#ifdef SQLITE_DEFAULT_LOCKING_MODE - "DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE), -#endif -#ifdef SQLITE_DEFAULT_LOOKASIDE - "DEFAULT_LOOKASIDE=" CTIMEOPT_VAL2(SQLITE_DEFAULT_LOOKASIDE), -#endif -#ifdef SQLITE_DEFAULT_MEMSTATUS -# if SQLITE_DEFAULT_MEMSTATUS != 1 - "DEFAULT_MEMSTATUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_MEMSTATUS), -# endif -#endif -#ifdef SQLITE_DEFAULT_MMAP_SIZE - "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE), -#endif -#ifdef SQLITE_DEFAULT_PAGE_SIZE - "DEFAULT_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_PAGE_SIZE), -#endif -#ifdef SQLITE_DEFAULT_PCACHE_INITSZ - "DEFAULT_PCACHE_INITSZ=" CTIMEOPT_VAL(SQLITE_DEFAULT_PCACHE_INITSZ), -#endif -#ifdef SQLITE_DEFAULT_PROXYDIR_PERMISSIONS - "DEFAULT_PROXYDIR_PERMISSIONS=" CTIMEOPT_VAL(SQLITE_DEFAULT_PROXYDIR_PERMISSIONS), -#endif -#if SQLITE_DEFAULT_RECURSIVE_TRIGGERS - "DEFAULT_RECURSIVE_TRIGGERS", -#endif -#ifdef SQLITE_DEFAULT_ROWEST - "DEFAULT_ROWEST=" CTIMEOPT_VAL(SQLITE_DEFAULT_ROWEST), -#endif -#ifdef SQLITE_DEFAULT_SECTOR_SIZE - "DEFAULT_SECTOR_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_SECTOR_SIZE), -#endif -#ifdef SQLITE_DEFAULT_SYNCHRONOUS - "DEFAULT_SYNCHRONOUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_SYNCHRONOUS), -#endif -#ifdef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT - "DEFAULT_WAL_AUTOCHECKPOINT=" CTIMEOPT_VAL(SQLITE_DEFAULT_WAL_AUTOCHECKPOINT), -#endif -#ifdef SQLITE_DEFAULT_WAL_SYNCHRONOUS - "DEFAULT_WAL_SYNCHRONOUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_WAL_SYNCHRONOUS), -#endif -#ifdef SQLITE_DEFAULT_WORKER_THREADS - "DEFAULT_WORKER_THREADS=" CTIMEOPT_VAL(SQLITE_DEFAULT_WORKER_THREADS), -#endif -#if SQLITE_DIRECT_OVERFLOW_READ - "DIRECT_OVERFLOW_READ", -#endif -#if SQLITE_DISABLE_DIRSYNC - "DISABLE_DIRSYNC", -#endif -#if SQLITE_DISABLE_FTS3_UNICODE - "DISABLE_FTS3_UNICODE", -#endif -#if SQLITE_DISABLE_FTS4_DEFERRED - "DISABLE_FTS4_DEFERRED", -#endif -#if SQLITE_DISABLE_INTRINSIC - "DISABLE_INTRINSIC", -#endif -#if SQLITE_DISABLE_LFS - "DISABLE_LFS", -#endif -#if SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS - "DISABLE_PAGECACHE_OVERFLOW_STATS", -#endif -#if SQLITE_DISABLE_SKIPAHEAD_DISTINCT - "DISABLE_SKIPAHEAD_DISTINCT", -#endif -#ifdef SQLITE_ENABLE_8_3_NAMES - "ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES), -#endif -#if SQLITE_ENABLE_API_ARMOR - "ENABLE_API_ARMOR", -#endif -#if SQLITE_ENABLE_ATOMIC_WRITE - "ENABLE_ATOMIC_WRITE", -#endif -#if SQLITE_ENABLE_BATCH_ATOMIC_WRITE - "ENABLE_BATCH_ATOMIC_WRITE", -#endif -#if SQLITE_ENABLE_BYTECODE_VTAB - "ENABLE_BYTECODE_VTAB", -#endif -#ifdef SQLITE_ENABLE_CEROD - "ENABLE_CEROD=" CTIMEOPT_VAL(SQLITE_ENABLE_CEROD), -#endif -#if SQLITE_ENABLE_COLUMN_METADATA - "ENABLE_COLUMN_METADATA", -#endif -#if SQLITE_ENABLE_COLUMN_USED_MASK - "ENABLE_COLUMN_USED_MASK", -#endif -#if SQLITE_ENABLE_COSTMULT - "ENABLE_COSTMULT", -#endif -#if SQLITE_ENABLE_CURSOR_HINTS - "ENABLE_CURSOR_HINTS", -#endif -#if SQLITE_ENABLE_DBPAGE_VTAB - "ENABLE_DBPAGE_VTAB", -#endif -#if SQLITE_ENABLE_DBSTAT_VTAB - "ENABLE_DBSTAT_VTAB", -#endif -#if SQLITE_ENABLE_EXPENSIVE_ASSERT - "ENABLE_EXPENSIVE_ASSERT", -#endif -#if SQLITE_ENABLE_EXPLAIN_COMMENTS - "ENABLE_EXPLAIN_COMMENTS", -#endif -#if SQLITE_ENABLE_FTS3 - "ENABLE_FTS3", -#endif -#if SQLITE_ENABLE_FTS3_PARENTHESIS - "ENABLE_FTS3_PARENTHESIS", -#endif -#if SQLITE_ENABLE_FTS3_TOKENIZER - "ENABLE_FTS3_TOKENIZER", -#endif -#if SQLITE_ENABLE_FTS4 - "ENABLE_FTS4", -#endif -#if SQLITE_ENABLE_FTS5 - "ENABLE_FTS5", -#endif -#if SQLITE_ENABLE_GEOPOLY - "ENABLE_GEOPOLY", -#endif -#if SQLITE_ENABLE_HIDDEN_COLUMNS - "ENABLE_HIDDEN_COLUMNS", -#endif -#if SQLITE_ENABLE_ICU - "ENABLE_ICU", -#endif -#if SQLITE_ENABLE_IOTRACE - "ENABLE_IOTRACE", -#endif -#if SQLITE_ENABLE_JSON1 - "ENABLE_JSON1", -#endif -#if SQLITE_ENABLE_LOAD_EXTENSION - "ENABLE_LOAD_EXTENSION", -#endif -#ifdef SQLITE_ENABLE_LOCKING_STYLE - "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE), -#endif -#if SQLITE_ENABLE_MATH_FUNCTIONS - "ENABLE_MATH_FUNCTIONS", -#endif -#if SQLITE_ENABLE_MEMORY_MANAGEMENT - "ENABLE_MEMORY_MANAGEMENT", -#endif -#if SQLITE_ENABLE_MEMSYS3 - "ENABLE_MEMSYS3", -#endif -#if SQLITE_ENABLE_MEMSYS5 - "ENABLE_MEMSYS5", -#endif -#if SQLITE_ENABLE_MULTIPLEX - "ENABLE_MULTIPLEX", -#endif -#if SQLITE_ENABLE_NORMALIZE - "ENABLE_NORMALIZE", -#endif -#if SQLITE_ENABLE_NULL_TRIM - "ENABLE_NULL_TRIM", -#endif -#if SQLITE_ENABLE_OFFSET_SQL_FUNC - "ENABLE_OFFSET_SQL_FUNC", -#endif -#if SQLITE_ENABLE_OVERSIZE_CELL_CHECK - "ENABLE_OVERSIZE_CELL_CHECK", -#endif -#if SQLITE_ENABLE_PREUPDATE_HOOK - "ENABLE_PREUPDATE_HOOK", -#endif -#if SQLITE_ENABLE_QPSG - "ENABLE_QPSG", -#endif -#if SQLITE_ENABLE_RBU - "ENABLE_RBU", -#endif -#if SQLITE_ENABLE_RTREE - "ENABLE_RTREE", -#endif -#if SQLITE_ENABLE_SELECTTRACE - "ENABLE_SELECTTRACE", -#endif -#if SQLITE_ENABLE_SESSION - "ENABLE_SESSION", -#endif -#if SQLITE_ENABLE_SNAPSHOT - "ENABLE_SNAPSHOT", -#endif -#if SQLITE_ENABLE_SORTER_REFERENCES - "ENABLE_SORTER_REFERENCES", -#endif -#if SQLITE_ENABLE_SQLLOG - "ENABLE_SQLLOG", -#endif -#if SQLITE_ENABLE_STAT4 - "ENABLE_STAT4", -#endif -#if SQLITE_ENABLE_STMTVTAB - "ENABLE_STMTVTAB", -#endif -#if SQLITE_ENABLE_STMT_SCANSTATUS - "ENABLE_STMT_SCANSTATUS", -#endif -#if SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION - "ENABLE_UNKNOWN_SQL_FUNCTION", -#endif -#if SQLITE_ENABLE_UNLOCK_NOTIFY - "ENABLE_UNLOCK_NOTIFY", -#endif -#if SQLITE_ENABLE_UPDATE_DELETE_LIMIT - "ENABLE_UPDATE_DELETE_LIMIT", -#endif -#if SQLITE_ENABLE_URI_00_ERROR - "ENABLE_URI_00_ERROR", -#endif -#if SQLITE_ENABLE_VFSTRACE - "ENABLE_VFSTRACE", -#endif -#if SQLITE_ENABLE_WHERETRACE - "ENABLE_WHERETRACE", -#endif -#if SQLITE_ENABLE_ZIPVFS - "ENABLE_ZIPVFS", -#endif -#if SQLITE_EXPLAIN_ESTIMATED_ROWS - "EXPLAIN_ESTIMATED_ROWS", -#endif -#if SQLITE_EXTRA_IFNULLROW - "EXTRA_IFNULLROW", -#endif -#ifdef SQLITE_EXTRA_INIT - "EXTRA_INIT=" CTIMEOPT_VAL(SQLITE_EXTRA_INIT), -#endif -#ifdef SQLITE_EXTRA_SHUTDOWN - "EXTRA_SHUTDOWN=" CTIMEOPT_VAL(SQLITE_EXTRA_SHUTDOWN), -#endif -#ifdef SQLITE_FTS3_MAX_EXPR_DEPTH - "FTS3_MAX_EXPR_DEPTH=" CTIMEOPT_VAL(SQLITE_FTS3_MAX_EXPR_DEPTH), -#endif -#if SQLITE_FTS5_ENABLE_TEST_MI - "FTS5_ENABLE_TEST_MI", -#endif -#if SQLITE_FTS5_NO_WITHOUT_ROWID - "FTS5_NO_WITHOUT_ROWID", -#endif -#if HAVE_ISNAN || SQLITE_HAVE_ISNAN - "HAVE_ISNAN", -#endif -#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX -# if SQLITE_HOMEGROWN_RECURSIVE_MUTEX != 1 - "HOMEGROWN_RECURSIVE_MUTEX=" CTIMEOPT_VAL(SQLITE_HOMEGROWN_RECURSIVE_MUTEX), -# endif -#endif -#if SQLITE_IGNORE_AFP_LOCK_ERRORS - "IGNORE_AFP_LOCK_ERRORS", -#endif -#if SQLITE_IGNORE_FLOCK_LOCK_ERRORS - "IGNORE_FLOCK_LOCK_ERRORS", -#endif -#if SQLITE_INLINE_MEMCPY - "INLINE_MEMCPY", -#endif -#if SQLITE_INT64_TYPE - "INT64_TYPE", -#endif -#ifdef SQLITE_INTEGRITY_CHECK_ERROR_MAX - "INTEGRITY_CHECK_ERROR_MAX=" CTIMEOPT_VAL(SQLITE_INTEGRITY_CHECK_ERROR_MAX), -#endif -#if SQLITE_LIKE_DOESNT_MATCH_BLOBS - "LIKE_DOESNT_MATCH_BLOBS", -#endif -#if SQLITE_LOCK_TRACE - "LOCK_TRACE", -#endif -#if SQLITE_LOG_CACHE_SPILL - "LOG_CACHE_SPILL", -#endif -#ifdef SQLITE_MALLOC_SOFT_LIMIT - "MALLOC_SOFT_LIMIT=" CTIMEOPT_VAL(SQLITE_MALLOC_SOFT_LIMIT), -#endif -#ifdef SQLITE_MAX_ATTACHED - "MAX_ATTACHED=" CTIMEOPT_VAL(SQLITE_MAX_ATTACHED), -#endif -#ifdef SQLITE_MAX_COLUMN - "MAX_COLUMN=" CTIMEOPT_VAL(SQLITE_MAX_COLUMN), -#endif -#ifdef SQLITE_MAX_COMPOUND_SELECT - "MAX_COMPOUND_SELECT=" CTIMEOPT_VAL(SQLITE_MAX_COMPOUND_SELECT), -#endif -#ifdef SQLITE_MAX_DEFAULT_PAGE_SIZE - "MAX_DEFAULT_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_DEFAULT_PAGE_SIZE), -#endif -#ifdef SQLITE_MAX_EXPR_DEPTH - "MAX_EXPR_DEPTH=" CTIMEOPT_VAL(SQLITE_MAX_EXPR_DEPTH), -#endif -#ifdef SQLITE_MAX_FUNCTION_ARG - "MAX_FUNCTION_ARG=" CTIMEOPT_VAL(SQLITE_MAX_FUNCTION_ARG), -#endif -#ifdef SQLITE_MAX_LENGTH - "MAX_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_LENGTH), -#endif -#ifdef SQLITE_MAX_LIKE_PATTERN_LENGTH - "MAX_LIKE_PATTERN_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_LIKE_PATTERN_LENGTH), -#endif -#ifdef SQLITE_MAX_MEMORY - "MAX_MEMORY=" CTIMEOPT_VAL(SQLITE_MAX_MEMORY), -#endif -#ifdef SQLITE_MAX_MMAP_SIZE - "MAX_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE), -#endif -#ifdef SQLITE_MAX_MMAP_SIZE_ - "MAX_MMAP_SIZE_=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE_), -#endif -#ifdef SQLITE_MAX_PAGE_COUNT - "MAX_PAGE_COUNT=" CTIMEOPT_VAL(SQLITE_MAX_PAGE_COUNT), -#endif -#ifdef SQLITE_MAX_PAGE_SIZE - "MAX_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_PAGE_SIZE), -#endif -#ifdef SQLITE_MAX_SCHEMA_RETRY - "MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY), -#endif -#ifdef SQLITE_MAX_SQL_LENGTH - "MAX_SQL_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_SQL_LENGTH), -#endif -#ifdef SQLITE_MAX_TRIGGER_DEPTH - "MAX_TRIGGER_DEPTH=" CTIMEOPT_VAL(SQLITE_MAX_TRIGGER_DEPTH), -#endif -#ifdef SQLITE_MAX_VARIABLE_NUMBER - "MAX_VARIABLE_NUMBER=" CTIMEOPT_VAL(SQLITE_MAX_VARIABLE_NUMBER), -#endif -#ifdef SQLITE_MAX_VDBE_OP - "MAX_VDBE_OP=" CTIMEOPT_VAL(SQLITE_MAX_VDBE_OP), -#endif -#ifdef SQLITE_MAX_WORKER_THREADS - "MAX_WORKER_THREADS=" CTIMEOPT_VAL(SQLITE_MAX_WORKER_THREADS), -#endif -#if SQLITE_MEMDEBUG - "MEMDEBUG", -#endif -#if SQLITE_MIXED_ENDIAN_64BIT_FLOAT - "MIXED_ENDIAN_64BIT_FLOAT", -#endif -#if SQLITE_MMAP_READWRITE - "MMAP_READWRITE", -#endif -#if SQLITE_MUTEX_NOOP - "MUTEX_NOOP", -#endif -#if SQLITE_MUTEX_OMIT - "MUTEX_OMIT", -#endif -#if SQLITE_MUTEX_PTHREADS - "MUTEX_PTHREADS", -#endif -#if SQLITE_MUTEX_W32 - "MUTEX_W32", -#endif -#if SQLITE_NEED_ERR_NAME - "NEED_ERR_NAME", -#endif -#if SQLITE_NOINLINE - "NOINLINE", -#endif -#if SQLITE_NO_SYNC - "NO_SYNC", -#endif -#if SQLITE_OMIT_ALTERTABLE - "OMIT_ALTERTABLE", -#endif -#if SQLITE_OMIT_ANALYZE - "OMIT_ANALYZE", -#endif -#if SQLITE_OMIT_ATTACH - "OMIT_ATTACH", -#endif -#if SQLITE_OMIT_AUTHORIZATION - "OMIT_AUTHORIZATION", -#endif -#if SQLITE_OMIT_AUTOINCREMENT - "OMIT_AUTOINCREMENT", -#endif -#if SQLITE_OMIT_AUTOINIT - "OMIT_AUTOINIT", -#endif -#if SQLITE_OMIT_AUTOMATIC_INDEX - "OMIT_AUTOMATIC_INDEX", -#endif -#if SQLITE_OMIT_AUTORESET - "OMIT_AUTORESET", -#endif -#if SQLITE_OMIT_AUTOVACUUM - "OMIT_AUTOVACUUM", -#endif -#if SQLITE_OMIT_BETWEEN_OPTIMIZATION - "OMIT_BETWEEN_OPTIMIZATION", -#endif -#if SQLITE_OMIT_BLOB_LITERAL - "OMIT_BLOB_LITERAL", -#endif -#if SQLITE_OMIT_CAST - "OMIT_CAST", -#endif -#if SQLITE_OMIT_CHECK - "OMIT_CHECK", -#endif -#if SQLITE_OMIT_COMPLETE - "OMIT_COMPLETE", -#endif -#if SQLITE_OMIT_COMPOUND_SELECT - "OMIT_COMPOUND_SELECT", -#endif -#if SQLITE_OMIT_CONFLICT_CLAUSE - "OMIT_CONFLICT_CLAUSE", -#endif -#if SQLITE_OMIT_CTE - "OMIT_CTE", -#endif -#if defined(SQLITE_OMIT_DATETIME_FUNCS) || defined(SQLITE_OMIT_FLOATING_POINT) - "OMIT_DATETIME_FUNCS", -#endif -#if SQLITE_OMIT_DECLTYPE - "OMIT_DECLTYPE", -#endif -#if SQLITE_OMIT_DEPRECATED - "OMIT_DEPRECATED", -#endif -#if SQLITE_OMIT_DESERIALIZE - "OMIT_DESERIALIZE", -#endif -#if SQLITE_OMIT_DISKIO - "OMIT_DISKIO", -#endif -#if SQLITE_OMIT_EXPLAIN - "OMIT_EXPLAIN", -#endif -#if SQLITE_OMIT_FLAG_PRAGMAS - "OMIT_FLAG_PRAGMAS", -#endif -#if SQLITE_OMIT_FLOATING_POINT - "OMIT_FLOATING_POINT", -#endif -#if SQLITE_OMIT_FOREIGN_KEY - "OMIT_FOREIGN_KEY", -#endif -#if SQLITE_OMIT_GET_TABLE - "OMIT_GET_TABLE", -#endif -#if SQLITE_OMIT_HEX_INTEGER - "OMIT_HEX_INTEGER", -#endif -#if SQLITE_OMIT_INCRBLOB - "OMIT_INCRBLOB", -#endif -#if SQLITE_OMIT_INTEGRITY_CHECK - "OMIT_INTEGRITY_CHECK", -#endif -#if SQLITE_OMIT_INTROSPECTION_PRAGMAS - "OMIT_INTROSPECTION_PRAGMAS", -#endif -#if SQLITE_OMIT_LIKE_OPTIMIZATION - "OMIT_LIKE_OPTIMIZATION", -#endif -#if SQLITE_OMIT_LOAD_EXTENSION - "OMIT_LOAD_EXTENSION", -#endif -#if SQLITE_OMIT_LOCALTIME - "OMIT_LOCALTIME", -#endif -#if SQLITE_OMIT_LOOKASIDE - "OMIT_LOOKASIDE", -#endif -#if SQLITE_OMIT_MEMORYDB - "OMIT_MEMORYDB", -#endif -#if SQLITE_OMIT_OR_OPTIMIZATION - "OMIT_OR_OPTIMIZATION", -#endif -#if SQLITE_OMIT_PAGER_PRAGMAS - "OMIT_PAGER_PRAGMAS", -#endif -#if SQLITE_OMIT_PARSER_TRACE - "OMIT_PARSER_TRACE", -#endif -#if SQLITE_OMIT_POPEN - "OMIT_POPEN", -#endif -#if SQLITE_OMIT_PRAGMA - "OMIT_PRAGMA", -#endif -#if SQLITE_OMIT_PROGRESS_CALLBACK - "OMIT_PROGRESS_CALLBACK", -#endif -#if SQLITE_OMIT_QUICKBALANCE - "OMIT_QUICKBALANCE", -#endif -#if SQLITE_OMIT_REINDEX - "OMIT_REINDEX", -#endif -#if SQLITE_OMIT_SCHEMA_PRAGMAS - "OMIT_SCHEMA_PRAGMAS", -#endif -#if SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS - "OMIT_SCHEMA_VERSION_PRAGMAS", -#endif -#if SQLITE_OMIT_SHARED_CACHE - "OMIT_SHARED_CACHE", -#endif -#if SQLITE_OMIT_SHUTDOWN_DIRECTORIES - "OMIT_SHUTDOWN_DIRECTORIES", -#endif -#if SQLITE_OMIT_SUBQUERY - "OMIT_SUBQUERY", -#endif -#if SQLITE_OMIT_TCL_VARIABLE - "OMIT_TCL_VARIABLE", -#endif -#if SQLITE_OMIT_TEMPDB - "OMIT_TEMPDB", -#endif -#if SQLITE_OMIT_TEST_CONTROL - "OMIT_TEST_CONTROL", -#endif -#ifdef SQLITE_OMIT_TRACE -# if SQLITE_OMIT_TRACE != 1 - "OMIT_TRACE=" CTIMEOPT_VAL(SQLITE_OMIT_TRACE), -# endif -#endif -#if SQLITE_OMIT_TRIGGER - "OMIT_TRIGGER", -#endif -#if SQLITE_OMIT_TRUNCATE_OPTIMIZATION - "OMIT_TRUNCATE_OPTIMIZATION", -#endif -#if SQLITE_OMIT_UTF16 - "OMIT_UTF16", -#endif -#if SQLITE_OMIT_VACUUM - "OMIT_VACUUM", -#endif -#if SQLITE_OMIT_VIEW - "OMIT_VIEW", -#endif -#if SQLITE_OMIT_VIRTUALTABLE - "OMIT_VIRTUALTABLE", -#endif -#if SQLITE_OMIT_WAL - "OMIT_WAL", -#endif -#if SQLITE_OMIT_WSD - "OMIT_WSD", -#endif -#if SQLITE_OMIT_XFER_OPT - "OMIT_XFER_OPT", -#endif -#if SQLITE_PCACHE_SEPARATE_HEADER - "PCACHE_SEPARATE_HEADER", -#endif -#if SQLITE_PERFORMANCE_TRACE - "PERFORMANCE_TRACE", -#endif -#ifdef SQLITE_POWERSAFE_OVERWRITE -# if SQLITE_POWERSAFE_OVERWRITE != 1 - "POWERSAFE_OVERWRITE=" CTIMEOPT_VAL(SQLITE_POWERSAFE_OVERWRITE), -# endif -#endif -#if SQLITE_PREFER_PROXY_LOCKING - "PREFER_PROXY_LOCKING", -#endif -#if SQLITE_PROXY_DEBUG - "PROXY_DEBUG", -#endif -#if SQLITE_REVERSE_UNORDERED_SELECTS - "REVERSE_UNORDERED_SELECTS", -#endif -#if SQLITE_RTREE_INT_ONLY - "RTREE_INT_ONLY", -#endif -#if SQLITE_SECURE_DELETE - "SECURE_DELETE", -#endif -#if SQLITE_SMALL_STACK - "SMALL_STACK", -#endif -#ifdef SQLITE_SORTER_PMASZ - "SORTER_PMASZ=" CTIMEOPT_VAL(SQLITE_SORTER_PMASZ), -#endif -#if SQLITE_SOUNDEX - "SOUNDEX", -#endif -#ifdef SQLITE_STAT4_SAMPLES - "STAT4_SAMPLES=" CTIMEOPT_VAL(SQLITE_STAT4_SAMPLES), -#endif -#ifdef SQLITE_STMTJRNL_SPILL - "STMTJRNL_SPILL=" CTIMEOPT_VAL(SQLITE_STMTJRNL_SPILL), -#endif -#if SQLITE_SUBSTR_COMPATIBILITY - "SUBSTR_COMPATIBILITY", -#endif -#if (!defined(SQLITE_WIN32_MALLOC) \ - && !defined(SQLITE_ZERO_MALLOC) \ - && !defined(SQLITE_MEMDEBUG) \ - ) || defined(SQLITE_SYSTEM_MALLOC) - "SYSTEM_MALLOC", -#endif -#if SQLITE_TCL - "TCL", -#endif -#ifdef SQLITE_TEMP_STORE - "TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE), -#endif -#if SQLITE_TEST - "TEST", -#endif -#if defined(SQLITE_THREADSAFE) - "THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE), -#elif defined(THREADSAFE) - "THREADSAFE=" CTIMEOPT_VAL(THREADSAFE), -#else - "THREADSAFE=1", -#endif -#if SQLITE_UNLINK_AFTER_CLOSE - "UNLINK_AFTER_CLOSE", -#endif -#if SQLITE_UNTESTABLE - "UNTESTABLE", -#endif -#if SQLITE_USER_AUTHENTICATION - "USER_AUTHENTICATION", -#endif -#if SQLITE_USE_ALLOCA - "USE_ALLOCA", -#endif -#if SQLITE_USE_FCNTL_TRACE - "USE_FCNTL_TRACE", -#endif -#if SQLITE_USE_URI - "USE_URI", -#endif -#if SQLITE_VDBE_COVERAGE - "VDBE_COVERAGE", -#endif -#if SQLITE_WIN32_MALLOC - "WIN32_MALLOC", -#endif -#if SQLITE_ZERO_MALLOC - "ZERO_MALLOC", -#endif -/* -** END CODE GENERATED BY tool/mkctime.tcl -*/ -}; - -SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt){ - *pnOpt = sizeof(sqlite3azCompileOpt) / sizeof(sqlite3azCompileOpt[0]); - return (const char**)sqlite3azCompileOpt; -} - -#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ - -/************** End of ctime.c ***********************************************/ /************** Begin file sqliteInt.h ***************************************/ /* ** 2001 September 15 @@ -1075,6 +288,17 @@ SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt){ # define _USE_32BIT_TIME_T #endif +/* Optionally #include a user-defined header, whereby compilation options +** may be set prior to where they take effect, but after platform setup. +** If SQLITE_CUSTOM_INCLUDE=? is defined, its value names the #include +** file. +*/ +#ifdef SQLITE_CUSTOM_INCLUDE +# define INC_STRINGIFY_(f) #f +# define INC_STRINGIFY(f) INC_STRINGIFY_(f) +# include INC_STRINGIFY(SQLITE_CUSTOM_INCLUDE) +#endif + /* The public SQLite interface. The _FILE_OFFSET_BITS macro must appear ** first in QNX. Also, the _USE_32BIT_TIME_T macro must appear first for ** MinGW. @@ -1126,7 +350,30 @@ extern "C" { /* -** Provide the ability to override linkage features of the interface. +** Facilitate override of interface linkage and calling conventions. +** Be aware that these macros may not be used within this particular +** translation of the amalgamation and its associated header file. +** +** The SQLITE_EXTERN and SQLITE_API macros are used to instruct the +** compiler that the target identifier should have external linkage. +** +** The SQLITE_CDECL macro is used to set the calling convention for +** public functions that accept a variable number of arguments. +** +** The SQLITE_APICALL macro is used to set the calling convention for +** public functions that accept a fixed number of arguments. +** +** The SQLITE_STDCALL macro is no longer used and is now deprecated. +** +** The SQLITE_CALLBACK macro is used to set the calling convention for +** function pointers. +** +** The SQLITE_SYSAPI macro is used to set the calling convention for +** functions provided by the operating system. +** +** Currently, the SQLITE_CDECL, SQLITE_APICALL, SQLITE_CALLBACK, and +** SQLITE_SYSAPI macros are used only when building for environments +** that require non-default calling conventions. */ #ifndef SQLITE_EXTERN # define SQLITE_EXTERN extern @@ -1206,9 +453,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.36.0" -#define SQLITE_VERSION_NUMBER 3036000 -#define SQLITE_SOURCE_ID "2021-06-18 18:36:39 5c9a6c06871cb9fe42814af9c039eb6da5427a6ec28f187af7ebfb62eafa66e5" +#define SQLITE_VERSION "3.38.5" +#define SQLITE_VERSION_NUMBER 3038005 +#define SQLITE_SOURCE_ID "2022-05-06 15:25:27 78d9c993d404cdfaa7fdd2973fa1052e3da9f66215cff9c5540ebe55c407d9fe" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1620,12 +867,13 @@ SQLITE_API int sqlite3_exec( #define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) #define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) #define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8)) +#define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) #define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) -#define SQLITE_OK_SYMLINK (SQLITE_OK | (2<<8)) +#define SQLITE_OK_SYMLINK (SQLITE_OK | (2<<8)) /* internal use only */ /* ** CAPI3REF: Flags For File Open Operations @@ -1633,6 +881,19 @@ SQLITE_API int sqlite3_exec( ** These bit values are intended for use in the ** 3rd parameter to the [sqlite3_open_v2()] interface and ** in the 4th parameter to the [sqlite3_vfs.xOpen] method. +** +** Only those flags marked as "Ok for sqlite3_open_v2()" may be +** used as the third argument to the [sqlite3_open_v2()] interface. +** The other flags have historically been ignored by sqlite3_open_v2(), +** though future versions of SQLite might change so that an error is +** raised if any of the disallowed bits are passed into sqlite3_open_v2(). +** Applications should not depend on the historical behavior. +** +** Note in particular that passing the SQLITE_OPEN_EXCLUSIVE flag into +** [sqlite3_open_v2()] does *not* cause the underlying database file +** to be opened using O_EXCL. Passing SQLITE_OPEN_EXCLUSIVE into +** [sqlite3_open_v2()] has historically be a no-op and might become an +** error in future versions of SQLite. */ #define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ @@ -1655,6 +916,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ #define SQLITE_OPEN_NOFOLLOW 0x01000000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_EXRESCODE 0x02000000 /* Extended result codes */ /* Reserved: 0x00F00000 */ /* Legacy compatibility: */ @@ -3547,11 +2809,14 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); ** CAPI3REF: Count The Number Of Rows Modified ** METHOD: sqlite3 ** -** ^This function returns the number of rows modified, inserted or +** ^These functions return the number of rows modified, inserted or ** deleted by the most recently completed INSERT, UPDATE or DELETE ** statement on the database connection specified by the only parameter. -** ^Executing any other type of SQL statement does not modify the value -** returned by this function. +** The two functions are identical except for the type of the return value +** and that if the number of rows modified by the most recent INSERT, UPDATE +** or DELETE is greater than the maximum value supported by type "int", then +** the return value of sqlite3_changes() is undefined. ^Executing any other +** type of SQL statement does not modify the value returned by these functions. ** ** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are ** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], @@ -3600,16 +2865,21 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); ** */ SQLITE_API int sqlite3_changes(sqlite3*); +SQLITE_API sqlite3_int64 sqlite3_changes64(sqlite3*); /* ** CAPI3REF: Total Number Of Rows Modified ** METHOD: sqlite3 ** -** ^This function returns the total number of rows inserted, modified or +** ^These functions return the total number of rows inserted, modified or ** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed ** since the database connection was opened, including those executed as -** part of trigger programs. ^Executing any other type of SQL statement -** does not affect the value returned by sqlite3_total_changes(). +** part of trigger programs. The two functions are identical except for the +** type of the return value and that if the number of rows modified by the +** connection exceeds the maximum value supported by type "int", then +** the return value of sqlite3_total_changes() is undefined. ^Executing +** any other type of SQL statement does not affect the value returned by +** sqlite3_total_changes(). ** ** ^Changes made as part of [foreign key actions] are included in the ** count, but those made as part of REPLACE constraint resolution are @@ -3637,6 +2907,7 @@ SQLITE_API int sqlite3_changes(sqlite3*); ** */ SQLITE_API int sqlite3_total_changes(sqlite3*); +SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*); /* ** CAPI3REF: Interrupt A Long-Running Query @@ -4466,6 +3737,14 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** the default shared cache setting provided by ** [sqlite3_enable_shared_cache()].)^ ** +** [[OPEN_EXRESCODE]] ^(
[SQLITE_OPEN_EXRESCODE]
+**
The database connection comes up in "extended result code mode". +** In other words, the database behaves has if +** [sqlite3_extended_result_codes(db,1)] where called on the database +** connection as soon as the connection is created. In addition to setting +** the extended result code mode, this flag also causes [sqlite3_open_v2()] +** to return an extended result code.
+** ** [[OPEN_NOFOLLOW]] ^(
[SQLITE_OPEN_NOFOLLOW]
**
The database filename is not allowed to be a symbolic link
** )^ @@ -4473,7 +3752,15 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** If the 3rd parameter to sqlite3_open_v2() is not one of the ** required combinations shown above optionally combined with other ** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] -** then the behavior is undefined. +** then the behavior is undefined. Historic versions of SQLite +** have silently ignored surplus bits in the flags parameter to +** sqlite3_open_v2(), however that behavior might not be carried through +** into future versions of SQLite and so applications should not rely +** upon it. Note in particular that the SQLITE_OPEN_EXCLUSIVE flag is a no-op +** for sqlite3_open_v2(). The SQLITE_OPEN_EXCLUSIVE does *not* cause +** the open to fail if the database already exists. The SQLITE_OPEN_EXCLUSIVE +** flag is intended for use by the [sqlite3_vfs|VFS interface] only, and not +** by sqlite3_open_v2(). ** ** ^The fourth parameter to sqlite3_open_v2() is the name of the ** [sqlite3_vfs] object that defines the operating system interface that @@ -4844,13 +4131,14 @@ SQLITE_API void sqlite3_free_filename(char*); ** sqlite3_extended_errcode() might change with each API call. ** Except, there are some interfaces that are guaranteed to never ** change the value of the error code. The error-code preserving -** interfaces are: +** interfaces include the following: ** **
    **
  • sqlite3_errcode() **
  • sqlite3_extended_errcode() **
  • sqlite3_errmsg() **
  • sqlite3_errmsg16() +**
  • sqlite3_error_offset() **
** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language @@ -4865,6 +4153,13 @@ SQLITE_API void sqlite3_free_filename(char*); ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** +** ^If the most recent error references a specific token in the input +** SQL, the sqlite3_error_offset() interface returns the byte offset +** of the start of that token. ^The byte offset returned by +** sqlite3_error_offset() assumes that the input SQL is UTF8. +** ^If the most recent error does not reference a specific token in the input +** SQL, then the sqlite3_error_offset() function returns -1. +** ** When the serialized [threading mode] is in use, it might be the ** case that a second error occurs on a separate thread in between ** the time of the first error and the call to these interfaces. @@ -4884,6 +4179,7 @@ SQLITE_API int sqlite3_extended_errcode(sqlite3 *db); SQLITE_API const char *sqlite3_errmsg(sqlite3*); SQLITE_API const void *sqlite3_errmsg16(sqlite3*); SQLITE_API const char *sqlite3_errstr(int); +SQLITE_API int sqlite3_error_offset(sqlite3 *db); /* ** CAPI3REF: Prepared Statement Object @@ -5241,12 +4537,17 @@ SQLITE_API int sqlite3_prepare16_v3( ** are managed by SQLite and are automatically freed when the prepared ** statement is finalized. ** ^The string returned by sqlite3_expanded_sql(P), on the other hand, -** is obtained from [sqlite3_malloc()] and must be free by the application +** is obtained from [sqlite3_malloc()] and must be freed by the application ** by passing it to [sqlite3_free()]. +** +** ^The sqlite3_normalized_sql() interface is only available if +** the [SQLITE_ENABLE_NORMALIZE] compile-time option is defined. */ SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt); +#ifdef SQLITE_ENABLE_NORMALIZE SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt); +#endif /* ** CAPI3REF: Determine If An SQL Statement Writes The Database @@ -5290,6 +4591,10 @@ SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt); ** be false. ^Similarly, a CREATE TABLE IF NOT EXISTS statement is a ** read-only no-op if the table already exists, but ** sqlite3_stmt_readonly() still returns false for such a statement. +** +** ^If prepared statement X is an [EXPLAIN] or [EXPLAIN QUERY PLAN] +** statement, then sqlite3_stmt_readonly(X) returns the same value as +** if the EXPLAIN or EXPLAIN QUERY PLAN prefix were omitted. */ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); @@ -5358,6 +4663,8 @@ SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*); ** ** ^The sqlite3_value objects that are passed as parameters into the ** implementation of [application-defined SQL functions] are protected. +** ^The sqlite3_value objects returned by [sqlite3_vtab_rhs_value()] +** are protected. ** ^The sqlite3_value object returned by ** [sqlite3_column_value()] is unprotected. ** Unprotected sqlite3_value objects may only be used as arguments @@ -5979,6 +5286,10 @@ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); ** even empty strings, are always zero-terminated. ^The return ** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. ** +** ^Strings returned by sqlite3_column_text16() always have the endianness +** which is native to the platform, regardless of the text encoding set +** for the database. +** ** Warning: ^The object returned by [sqlite3_column_value()] is an ** [unprotected sqlite3_value] object. In a multithreaded environment, ** an unprotected sqlite3_value object may only be used safely with @@ -5992,7 +5303,7 @@ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); ** [application-defined SQL functions] or [virtual tables], not within ** top-level application code. ** -** The these routines may attempt to convert the datatype of the result. +** These routines may attempt to convert the datatype of the result. ** ^For example, if the internal representation is FLOAT and a text result ** is requested, [sqlite3_snprintf()] is used internally to perform the ** conversion automatically. ^(The following table details the conversions @@ -6017,7 +5328,7 @@ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); ** TEXT BLOB No change ** BLOB INTEGER [CAST] to INTEGER ** BLOB FLOAT [CAST] to REAL -** BLOB TEXT Add a zero terminator if needed +** BLOB TEXT [CAST] to TEXT, ensure zero terminator ** ** )^ ** @@ -7430,6 +6741,72 @@ SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); +/* +** CAPI3REF: Autovacuum Compaction Amount Callback +** METHOD: sqlite3 +** +** ^The sqlite3_autovacuum_pages(D,C,P,X) interface registers a callback +** function C that is invoked prior to each autovacuum of the database +** file. ^The callback is passed a copy of the generic data pointer (P), +** the schema-name of the attached database that is being autovacuumed, +** the the size of the database file in pages, the number of free pages, +** and the number of bytes per page, respectively. The callback should +** return the number of free pages that should be removed by the +** autovacuum. ^If the callback returns zero, then no autovacuum happens. +** ^If the value returned is greater than or equal to the number of +** free pages, then a complete autovacuum happens. +** +**

^If there are multiple ATTACH-ed database files that are being +** modified as part of a transaction commit, then the autovacuum pages +** callback is invoked separately for each file. +** +**

The callback is not reentrant. The callback function should +** not attempt to invoke any other SQLite interface. If it does, bad +** things may happen, including segmentation faults and corrupt database +** files. The callback function should be a simple function that +** does some arithmetic on its input parameters and returns a result. +** +** ^The X parameter to sqlite3_autovacuum_pages(D,C,P,X) is an optional +** destructor for the P parameter. ^If X is not NULL, then X(P) is +** invoked whenever the database connection closes or when the callback +** is overwritten by another invocation of sqlite3_autovacuum_pages(). +** +**

^There is only one autovacuum pages callback per database connection. +** ^Each call to the sqlite3_autovacuum_pages() interface overrides all +** previous invocations for that database connection. ^If the callback +** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer, +** then the autovacuum steps callback is cancelled. The return value +** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might +** be some other error code if something goes wrong. The current +** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other +** return codes might be added in future releases. +** +**

If no autovacuum pages callback is specified (the usual case) or +** a NULL pointer is provided for the callback, +** then the default behavior is to vacuum all free pages. So, in other +** words, the default behavior is the same as if the callback function +** were something like this: +** +**

+**     unsigned int demonstration_autovac_pages_callback(
+**       void *pClientData,
+**       const char *zSchema,
+**       unsigned int nDbPage,
+**       unsigned int nFreePage,
+**       unsigned int nBytePerPage
+**     ){
+**       return nFreePage;
+**     }
+** 
+*/ +SQLITE_API int sqlite3_autovacuum_pages( + sqlite3 *db, + unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int), + void*, + void(*)(void*) +); + + /* ** CAPI3REF: Data Change Notification Callbacks ** METHOD: sqlite3 @@ -8071,24 +7448,56 @@ struct sqlite3_index_info { ** ** These macros define the allowed values for the ** [sqlite3_index_info].aConstraint[].op field. Each value represents -** an operator that is part of a constraint term in the wHERE clause of +** an operator that is part of a constraint term in the WHERE clause of ** a query that uses a [virtual table]. -*/ -#define SQLITE_INDEX_CONSTRAINT_EQ 2 -#define SQLITE_INDEX_CONSTRAINT_GT 4 -#define SQLITE_INDEX_CONSTRAINT_LE 8 -#define SQLITE_INDEX_CONSTRAINT_LT 16 -#define SQLITE_INDEX_CONSTRAINT_GE 32 -#define SQLITE_INDEX_CONSTRAINT_MATCH 64 -#define SQLITE_INDEX_CONSTRAINT_LIKE 65 -#define SQLITE_INDEX_CONSTRAINT_GLOB 66 -#define SQLITE_INDEX_CONSTRAINT_REGEXP 67 -#define SQLITE_INDEX_CONSTRAINT_NE 68 -#define SQLITE_INDEX_CONSTRAINT_ISNOT 69 -#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70 -#define SQLITE_INDEX_CONSTRAINT_ISNULL 71 -#define SQLITE_INDEX_CONSTRAINT_IS 72 -#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150 +** +** ^The left-hand operand of the operator is given by the corresponding +** aConstraint[].iColumn field. ^An iColumn of -1 indicates the left-hand +** operand is the rowid. +** The SQLITE_INDEX_CONSTRAINT_LIMIT and SQLITE_INDEX_CONSTRAINT_OFFSET +** operators have no left-hand operand, and so for those operators the +** corresponding aConstraint[].iColumn is meaningless and should not be +** used. +** +** All operator values from SQLITE_INDEX_CONSTRAINT_FUNCTION through +** value 255 are reserved to represent functions that are overloaded +** by the [xFindFunction|xFindFunction method] of the virtual table +** implementation. +** +** The right-hand operands for each constraint might be accessible using +** the [sqlite3_vtab_rhs_value()] interface. Usually the right-hand +** operand is only available if it appears as a single constant literal +** in the input SQL. If the right-hand operand is another column or an +** expression (even a constant expression) or a parameter, then the +** sqlite3_vtab_rhs_value() probably will not be able to extract it. +** ^The SQLITE_INDEX_CONSTRAINT_ISNULL and +** SQLITE_INDEX_CONSTRAINT_ISNOTNULL operators have no right-hand operand +** and hence calls to sqlite3_vtab_rhs_value() for those operators will +** always return SQLITE_NOTFOUND. +** +** The collating sequence to be used for comparison can be found using +** the [sqlite3_vtab_collation()] interface. For most real-world virtual +** tables, the collating sequence of constraints does not matter (for example +** because the constraints are numeric) and so the sqlite3_vtab_collation() +** interface is no commonly needed. +*/ +#define SQLITE_INDEX_CONSTRAINT_EQ 2 +#define SQLITE_INDEX_CONSTRAINT_GT 4 +#define SQLITE_INDEX_CONSTRAINT_LE 8 +#define SQLITE_INDEX_CONSTRAINT_LT 16 +#define SQLITE_INDEX_CONSTRAINT_GE 32 +#define SQLITE_INDEX_CONSTRAINT_MATCH 64 +#define SQLITE_INDEX_CONSTRAINT_LIKE 65 +#define SQLITE_INDEX_CONSTRAINT_GLOB 66 +#define SQLITE_INDEX_CONSTRAINT_REGEXP 67 +#define SQLITE_INDEX_CONSTRAINT_NE 68 +#define SQLITE_INDEX_CONSTRAINT_ISNOT 69 +#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70 +#define SQLITE_INDEX_CONSTRAINT_ISNULL 71 +#define SQLITE_INDEX_CONSTRAINT_IS 72 +#define SQLITE_INDEX_CONSTRAINT_LIMIT 73 +#define SQLITE_INDEX_CONSTRAINT_OFFSET 74 +#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150 /* ** CAPI3REF: Register A Virtual Table Implementation @@ -8117,7 +7526,7 @@ struct sqlite3_index_info { ** destructor. ** ** ^If the third parameter (the pointer to the sqlite3_module object) is -** NULL then no new module is create and any existing modules with the +** NULL then no new module is created and any existing modules with the ** same name are dropped. ** ** See also: [sqlite3_drop_modules()] @@ -8893,7 +8302,8 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_SEEK_COUNT 30 #define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 -#define SQLITE_TESTCTRL_LAST 32 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_LOGEST 33 +#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking @@ -9416,6 +8826,16 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** The counter is incremented on the first [sqlite3_step()] call of each ** cycle. ** +** [[SQLITE_STMTSTATUS_FILTER_MISS]] +** [[SQLITE_STMTSTATUS_FILTER HIT]] +**
SQLITE_STMTSTATUS_FILTER_HIT
+** SQLITE_STMTSTATUS_FILTER_MISS
+**
^SQLITE_STMTSTATUS_FILTER_HIT is the number of times that a join +** step was bypassed because a Bloom filter returned not-found. The +** corresponding SQLITE_STMTSTATUS_FILTER_MISS value is the number of +** times that the Bloom filter returned a find, and thus the join step +** had to be processed as normal. +** ** [[SQLITE_STMTSTATUS_MEMUSED]]
SQLITE_STMTSTATUS_MEMUSED
**
^This is the approximate number of bytes of heap memory ** used to store the prepared statement. ^This value is not actually @@ -9430,6 +8850,8 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); #define SQLITE_STMTSTATUS_VM_STEP 4 #define SQLITE_STMTSTATUS_REPREPARE 5 #define SQLITE_STMTSTATUS_RUN 6 +#define SQLITE_STMTSTATUS_FILTER_MISS 7 +#define SQLITE_STMTSTATUS_FILTER_HIT 8 #define SQLITE_STMTSTATUS_MEMUSED 99 /* @@ -10093,8 +9515,9 @@ SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); ** ** A single database handle may have at most a single write-ahead log callback ** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any -** previously registered write-ahead log callback. ^Note that the -** [sqlite3_wal_autocheckpoint()] interface and the +** previously registered write-ahead log callback. ^The return value is +** a copy of the third parameter from the previous call, if any, or 0. +** ^Note that the [sqlite3_wal_autocheckpoint()] interface and the ** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will ** overwrite any prior [sqlite3_wal_hook()] settings. */ @@ -10397,19 +9820,269 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*); /* ** CAPI3REF: Determine The Collation For a Virtual Table Constraint +** METHOD: sqlite3_index_info ** ** This function may only be called from within a call to the [xBestIndex] -** method of a [virtual table]. +** method of a [virtual table]. This function returns a pointer to a string +** that is the name of the appropriate collation sequence to use for text +** comparisons on the constraint identified by its arguments. +** +** The first argument must be the pointer to the [sqlite3_index_info] object +** that is the first parameter to the xBestIndex() method. The second argument +** must be an index into the aConstraint[] array belonging to the +** sqlite3_index_info structure passed to xBestIndex. +** +** Important: +** The first parameter must be the same pointer that is passed into the +** xBestMethod() method. The first parameter may not be a pointer to a +** different [sqlite3_index_info] object, even an exact copy. ** -** The first argument must be the sqlite3_index_info object that is the -** first parameter to the xBestIndex() method. The second argument must be -** an index into the aConstraint[] array belonging to the sqlite3_index_info -** structure passed to xBestIndex. This function returns a pointer to a buffer -** containing the name of the collation sequence for the corresponding -** constraint. +** The return value is computed as follows: +** +**
    +**
  1. If the constraint comes from a WHERE clause expression that contains +** a [COLLATE operator], then the name of the collation specified by +** that COLLATE operator is returned. +**

  2. If there is no COLLATE operator, but the column that is the subject +** of the constraint specifies an alternative collating sequence via +** a [COLLATE clause] on the column definition within the CREATE TABLE +** statement that was passed into [sqlite3_declare_vtab()], then the +** name of that alternative collating sequence is returned. +**

  3. Otherwise, "BINARY" is returned. +**

*/ SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int); +/* +** CAPI3REF: Determine if a virtual table query is DISTINCT +** METHOD: sqlite3_index_info +** +** This API may only be used from within an [xBestIndex|xBestIndex method] +** of a [virtual table] implementation. The result of calling this +** interface from outside of xBestIndex() is undefined and probably harmful. +** +** ^The sqlite3_vtab_distinct() interface returns an integer that is +** either 0, 1, or 2. The integer returned by sqlite3_vtab_distinct() +** gives the virtual table additional information about how the query +** planner wants the output to be ordered. As long as the virtual table +** can meet the ordering requirements of the query planner, it may set +** the "orderByConsumed" flag. +** +**
  1. +** ^If the sqlite3_vtab_distinct() interface returns 0, that means +** that the query planner needs the virtual table to return all rows in the +** sort order defined by the "nOrderBy" and "aOrderBy" fields of the +** [sqlite3_index_info] object. This is the default expectation. If the +** virtual table outputs all rows in sorted order, then it is always safe for +** the xBestIndex method to set the "orderByConsumed" flag, regardless of +** the return value from sqlite3_vtab_distinct(). +**

  2. +** ^(If the sqlite3_vtab_distinct() interface returns 1, that means +** that the query planner does not need the rows to be returned in sorted order +** as long as all rows with the same values in all columns identified by the +** "aOrderBy" field are adjacent.)^ This mode is used when the query planner +** is doing a GROUP BY. +**

  3. +** ^(If the sqlite3_vtab_distinct() interface returns 2, that means +** that the query planner does not need the rows returned in any particular +** order, as long as rows with the same values in all "aOrderBy" columns +** are adjacent.)^ ^(Furthermore, only a single row for each particular +** combination of values in the columns identified by the "aOrderBy" field +** needs to be returned.)^ ^It is always ok for two or more rows with the same +** values in all "aOrderBy" columns to be returned, as long as all such rows +** are adjacent. ^The virtual table may, if it chooses, omit extra rows +** that have the same value for all columns identified by "aOrderBy". +** ^However omitting the extra rows is optional. +** This mode is used for a DISTINCT query. +**

+** +** ^For the purposes of comparing virtual table output values to see if the +** values are same value for sorting purposes, two NULL values are considered +** to be the same. In other words, the comparison operator is "IS" +** (or "IS NOT DISTINCT FROM") and not "==". +** +** If a virtual table implementation is unable to meet the requirements +** specified above, then it must not set the "orderByConsumed" flag in the +** [sqlite3_index_info] object or an incorrect answer may result. +** +** ^A virtual table implementation is always free to return rows in any order +** it wants, as long as the "orderByConsumed" flag is not set. ^When the +** the "orderByConsumed" flag is unset, the query planner will add extra +** [bytecode] to ensure that the final results returned by the SQL query are +** ordered correctly. The use of the "orderByConsumed" flag and the +** sqlite3_vtab_distinct() interface is merely an optimization. ^Careful +** use of the sqlite3_vtab_distinct() interface and the "orderByConsumed" +** flag might help queries against a virtual table to run faster. Being +** overly aggressive and setting the "orderByConsumed" flag when it is not +** valid to do so, on the other hand, might cause SQLite to return incorrect +** results. +*/ +SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*); + +/* +** CAPI3REF: Identify and handle IN constraints in xBestIndex +** +** This interface may only be used from within an +** [xBestIndex|xBestIndex() method] of a [virtual table] implementation. +** The result of invoking this interface from any other context is +** undefined and probably harmful. +** +** ^(A constraint on a virtual table of the form +** "[IN operator|column IN (...)]" is +** communicated to the xBestIndex method as a +** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use +** this constraint, it must set the corresponding +** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under +** the usual mode of handling IN operators, SQLite generates [bytecode] +** that invokes the [xFilter|xFilter() method] once for each value +** on the right-hand side of the IN operator.)^ Thus the virtual table +** only sees a single value from the right-hand side of the IN operator +** at a time. +** +** In some cases, however, it would be advantageous for the virtual +** table to see all values on the right-hand of the IN operator all at +** once. The sqlite3_vtab_in() interfaces facilitates this in two ways: +** +**
    +**
  1. +** ^A call to sqlite3_vtab_in(P,N,-1) will return true (non-zero) +** if and only if the [sqlite3_index_info|P->aConstraint][N] constraint +** is an [IN operator] that can be processed all at once. ^In other words, +** sqlite3_vtab_in() with -1 in the third argument is a mechanism +** by which the virtual table can ask SQLite if all-at-once processing +** of the IN operator is even possible. +** +**

  2. +** ^A call to sqlite3_vtab_in(P,N,F) with F==1 or F==0 indicates +** to SQLite that the virtual table does or does not want to process +** the IN operator all-at-once, respectively. ^Thus when the third +** parameter (F) is non-negative, this interface is the mechanism by +** which the virtual table tells SQLite how it wants to process the +** IN operator. +**

+** +** ^The sqlite3_vtab_in(P,N,F) interface can be invoked multiple times +** within the same xBestIndex method call. ^For any given P,N pair, +** the return value from sqlite3_vtab_in(P,N,F) will always be the same +** within the same xBestIndex call. ^If the interface returns true +** (non-zero), that means that the constraint is an IN operator +** that can be processed all-at-once. ^If the constraint is not an IN +** operator or cannot be processed all-at-once, then the interface returns +** false. +** +** ^(All-at-once processing of the IN operator is selected if both of the +** following conditions are met: +** +**
    +**
  1. The P->aConstraintUsage[N].argvIndex value is set to a positive +** integer. This is how the virtual table tells SQLite that it wants to +** use the N-th constraint. +** +**

  2. The last call to sqlite3_vtab_in(P,N,F) for which F was +** non-negative had F>=1. +**

)^ +** +** ^If either or both of the conditions above are false, then SQLite uses +** the traditional one-at-a-time processing strategy for the IN constraint. +** ^If both conditions are true, then the argvIndex-th parameter to the +** xFilter method will be an [sqlite3_value] that appears to be NULL, +** but which can be passed to [sqlite3_vtab_in_first()] and +** [sqlite3_vtab_in_next()] to find all values on the right-hand side +** of the IN constraint. +*/ +SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); + +/* +** CAPI3REF: Find all elements on the right-hand side of an IN constraint. +** +** These interfaces are only useful from within the +** [xFilter|xFilter() method] of a [virtual table] implementation. +** The result of invoking these interfaces from any other context +** is undefined and probably harmful. +** +** The X parameter in a call to sqlite3_vtab_in_first(X,P) or +** sqlite3_vtab_in_next(X,P) must be one of the parameters to the +** xFilter method which invokes these routines, and specifically +** a parameter that was previously selected for all-at-once IN constraint +** processing use the [sqlite3_vtab_in()] interface in the +** [xBestIndex|xBestIndex method]. ^(If the X parameter is not +** an xFilter argument that was selected for all-at-once IN constraint +** processing, then these routines return [SQLITE_MISUSE])^ or perhaps +** exhibit some other undefined or harmful behavior. +** +** ^(Use these routines to access all values on the right-hand side +** of the IN constraint using code like the following: +** +**
+**    for(rc=sqlite3_vtab_in_first(pList, &pVal);
+**        rc==SQLITE_OK && pVal
+**        rc=sqlite3_vtab_in_next(pList, &pVal)
+**    ){
+**      // do something with pVal
+**    }
+**    if( rc!=SQLITE_OK ){
+**      // an error has occurred
+**    }
+** 
)^ +** +** ^On success, the sqlite3_vtab_in_first(X,P) and sqlite3_vtab_in_next(X,P) +** routines return SQLITE_OK and set *P to point to the first or next value +** on the RHS of the IN constraint. ^If there are no more values on the +** right hand side of the IN constraint, then *P is set to NULL and these +** routines return [SQLITE_DONE]. ^The return value might be +** some other value, such as SQLITE_NOMEM, in the event of a malfunction. +** +** The *ppOut values returned by these routines are only valid until the +** next call to either of these routines or until the end of the xFilter +** method from which these routines were called. If the virtual table +** implementation needs to retain the *ppOut values for longer, it must make +** copies. The *ppOut values are [protected sqlite3_value|protected]. +*/ +SQLITE_API int sqlite3_vtab_in_first(sqlite3_value *pVal, sqlite3_value **ppOut); +SQLITE_API int sqlite3_vtab_in_next(sqlite3_value *pVal, sqlite3_value **ppOut); + +/* +** CAPI3REF: Constraint values in xBestIndex() +** METHOD: sqlite3_index_info +** +** This API may only be used from within the [xBestIndex|xBestIndex method] +** of a [virtual table] implementation. The result of calling this interface +** from outside of an xBestIndex method are undefined and probably harmful. +** +** ^When the sqlite3_vtab_rhs_value(P,J,V) interface is invoked from within +** the [xBestIndex] method of a [virtual table] implementation, with P being +** a copy of the [sqlite3_index_info] object pointer passed into xBestIndex and +** J being a 0-based index into P->aConstraint[], then this routine +** attempts to set *V to the value of the right-hand operand of +** that constraint if the right-hand operand is known. ^If the +** right-hand operand is not known, then *V is set to a NULL pointer. +** ^The sqlite3_vtab_rhs_value(P,J,V) interface returns SQLITE_OK if +** and only if *V is set to a value. ^The sqlite3_vtab_rhs_value(P,J,V) +** inteface returns SQLITE_NOTFOUND if the right-hand side of the J-th +** constraint is not available. ^The sqlite3_vtab_rhs_value() interface +** can return an result code other than SQLITE_OK or SQLITE_NOTFOUND if +** something goes wrong. +** +** The sqlite3_vtab_rhs_value() interface is usually only successful if +** the right-hand operand of a constraint is a literal value in the original +** SQL statement. If the right-hand operand is an expression or a reference +** to some other column or a [host parameter], then sqlite3_vtab_rhs_value() +** will probably return [SQLITE_NOTFOUND]. +** +** ^(Some constraints, such as [SQLITE_INDEX_CONSTRAINT_ISNULL] and +** [SQLITE_INDEX_CONSTRAINT_ISNOTNULL], have no right-hand operand. For such +** constraints, sqlite3_vtab_rhs_value() always returns SQLITE_NOTFOUND.)^ +** +** ^The [sqlite3_value] object returned in *V is a protected sqlite3_value +** and remains valid for the duration of the xBestIndex method call. +** ^When xBestIndex returns, the sqlite3_value object returned by +** sqlite3_vtab_rhs_value() is automatically deallocated. +** +** The "_rhs_" in the name of this routine is an abbreviation for +** "Right-Hand Side". +*/ +SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **ppVal); + /* ** CAPI3REF: Conflict resolution modes ** KEYWORDS: {conflict resolution mode} @@ -10961,6 +10634,10 @@ SQLITE_API unsigned char *sqlite3_serialize( ** database is currently in a read transaction or is involved in a backup ** operation. ** +** It is not possible to deserialized into the TEMP database. If the +** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the +** function returns SQLITE_ERROR. +** ** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning. @@ -13443,7 +13120,7 @@ struct fts5_api { ** autoconf-based build */ #if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H) -/* #include "config.h" */ +#include "config.h" #define SQLITECONFIG_H 1 #endif @@ -13679,11 +13356,12 @@ struct fts5_api { #ifndef __has_extension # define __has_extension(x) 0 /* compatibility with non-clang compilers */ #endif -#if GCC_VERSION>=4007000 || \ - (__has_extension(c_atomic) && __has_extension(c_atomic_store_n)) +#if GCC_VERSION>=4007000 || __has_extension(c_atomic) +# define SQLITE_ATOMIC_INTRINSICS 1 # define AtomicLoad(PTR) __atomic_load_n((PTR),__ATOMIC_RELAXED) # define AtomicStore(PTR,VAL) __atomic_store_n((PTR),(VAL),__ATOMIC_RELAXED) #else +# define SQLITE_ATOMIC_INTRINSICS 0 # define AtomicLoad(PTR) (*(PTR)) # define AtomicStore(PTR,VAL) (*(PTR) = (VAL)) #endif @@ -13888,11 +13566,12 @@ struct fts5_api { ** is significant and used at least once. On switch statements ** where multiple cases go to the same block of code, testcase() ** can insure that all cases are evaluated. -** */ -#ifdef SQLITE_COVERAGE_TEST -SQLITE_PRIVATE void sqlite3Coverage(int); -# define testcase(X) if( X ){ sqlite3Coverage(__LINE__); } +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) +# ifndef SQLITE_AMALGAMATION + extern unsigned int sqlite3CoverageCounter; +# endif +# define testcase(X) if( X ){ sqlite3CoverageCounter += (unsigned)__LINE__; } #else # define testcase(X) #endif @@ -13922,6 +13601,14 @@ SQLITE_PRIVATE void sqlite3Coverage(int); # define VVA_ONLY(X) #endif +/* +** Disable ALWAYS() and NEVER() (make them pass-throughs) for coverage +** and mutation testing +*/ +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST) +# define SQLITE_OMIT_AUXILIARY_SAFETY_CHECKS 1 +#endif + /* ** The ALWAYS and NEVER macros surround boolean expressions which ** are intended to always be true or false, respectively. Such @@ -13937,7 +13624,7 @@ SQLITE_PRIVATE void sqlite3Coverage(int); ** be true and false so that the unreachable code they specify will ** not be counted as untested code. */ -#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST) +#if defined(SQLITE_OMIT_AUXILIARY_SAFETY_CHECKS) # define ALWAYS(X) (1) # define NEVER(X) (0) #elif !defined(NDEBUG) @@ -13948,26 +13635,6 @@ SQLITE_PRIVATE void sqlite3Coverage(int); # define NEVER(X) (X) #endif -/* -** The harmless(X) macro indicates that expression X is usually false -** but can be true without causing any problems, but we don't know of -** any way to cause X to be true. -** -** In debugging and testing builds, this macro will abort if X is ever -** true. In this way, developers are alerted to a possible test case -** that causes X to be true. If a harmless macro ever fails, that is -** an opportunity to change the macro into a testcase() and add a new -** test case to the test suite. -** -** For normal production builds, harmless(X) is a no-op, since it does -** not matter whether expression X is true or false. -*/ -#ifdef SQLITE_DEBUG -# define harmless(X) assert(!(X)); -#else -# define harmless(X) -#endif - /* ** Some conditionals are optimizations only. In other words, if the ** conditionals are replaced with a constant 1 (true) or 0 (false) then @@ -14031,6 +13698,13 @@ SQLITE_PRIVATE void sqlite3Coverage(int); # undef SQLITE_ENABLE_EXPLAIN_COMMENTS #endif +/* +** SQLITE_OMIT_VIRTUALTABLE implies SQLITE_OMIT_ALTERTABLE +*/ +#if defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_ALTERTABLE) +# define SQLITE_OMIT_ALTERTABLE +#endif + /* ** Return true (non-zero) if the input is an integer that is too large ** to fit in 32-bits. This macro is used inside of various testcase() @@ -14143,7 +13817,7 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); /* ** Number of entries in a hash table */ -/* #define sqliteHashCount(H) ((H)->count) // NOT USED */ +#define sqliteHashCount(H) ((H)->count) #endif /* SQLITE_HASH_H */ @@ -14175,8 +13849,8 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_LP 22 #define TK_RP 23 #define TK_AS 24 -#define TK_WITHOUT 25 -#define TK_COMMA 26 +#define TK_COMMA 25 +#define TK_WITHOUT 26 #define TK_ABORT 27 #define TK_ACTION 28 #define TK_AFTER 29 @@ -14262,78 +13936,79 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_SLASH 109 #define TK_REM 110 #define TK_CONCAT 111 -#define TK_COLLATE 112 -#define TK_BITNOT 113 -#define TK_ON 114 -#define TK_INDEXED 115 -#define TK_STRING 116 -#define TK_JOIN_KW 117 -#define TK_CONSTRAINT 118 -#define TK_DEFAULT 119 -#define TK_NULL 120 -#define TK_PRIMARY 121 -#define TK_UNIQUE 122 -#define TK_CHECK 123 -#define TK_REFERENCES 124 -#define TK_AUTOINCR 125 -#define TK_INSERT 126 -#define TK_DELETE 127 -#define TK_UPDATE 128 -#define TK_SET 129 -#define TK_DEFERRABLE 130 -#define TK_FOREIGN 131 -#define TK_DROP 132 -#define TK_UNION 133 -#define TK_ALL 134 -#define TK_EXCEPT 135 -#define TK_INTERSECT 136 -#define TK_SELECT 137 -#define TK_VALUES 138 -#define TK_DISTINCT 139 -#define TK_DOT 140 -#define TK_FROM 141 -#define TK_JOIN 142 -#define TK_USING 143 -#define TK_ORDER 144 -#define TK_GROUP 145 -#define TK_HAVING 146 -#define TK_LIMIT 147 -#define TK_WHERE 148 -#define TK_RETURNING 149 -#define TK_INTO 150 -#define TK_NOTHING 151 -#define TK_FLOAT 152 -#define TK_BLOB 153 -#define TK_INTEGER 154 -#define TK_VARIABLE 155 -#define TK_CASE 156 -#define TK_WHEN 157 -#define TK_THEN 158 -#define TK_ELSE 159 -#define TK_INDEX 160 -#define TK_ALTER 161 -#define TK_ADD 162 -#define TK_WINDOW 163 -#define TK_OVER 164 -#define TK_FILTER 165 -#define TK_COLUMN 166 -#define TK_AGG_FUNCTION 167 -#define TK_AGG_COLUMN 168 -#define TK_TRUEFALSE 169 -#define TK_ISNOT 170 -#define TK_FUNCTION 171 -#define TK_UMINUS 172 -#define TK_UPLUS 173 -#define TK_TRUTH 174 -#define TK_REGISTER 175 -#define TK_VECTOR 176 -#define TK_SELECT_COLUMN 177 -#define TK_IF_NULL_ROW 178 -#define TK_ASTERISK 179 -#define TK_SPAN 180 -#define TK_ERROR 181 -#define TK_SPACE 182 -#define TK_ILLEGAL 183 +#define TK_PTR 112 +#define TK_COLLATE 113 +#define TK_BITNOT 114 +#define TK_ON 115 +#define TK_INDEXED 116 +#define TK_STRING 117 +#define TK_JOIN_KW 118 +#define TK_CONSTRAINT 119 +#define TK_DEFAULT 120 +#define TK_NULL 121 +#define TK_PRIMARY 122 +#define TK_UNIQUE 123 +#define TK_CHECK 124 +#define TK_REFERENCES 125 +#define TK_AUTOINCR 126 +#define TK_INSERT 127 +#define TK_DELETE 128 +#define TK_UPDATE 129 +#define TK_SET 130 +#define TK_DEFERRABLE 131 +#define TK_FOREIGN 132 +#define TK_DROP 133 +#define TK_UNION 134 +#define TK_ALL 135 +#define TK_EXCEPT 136 +#define TK_INTERSECT 137 +#define TK_SELECT 138 +#define TK_VALUES 139 +#define TK_DISTINCT 140 +#define TK_DOT 141 +#define TK_FROM 142 +#define TK_JOIN 143 +#define TK_USING 144 +#define TK_ORDER 145 +#define TK_GROUP 146 +#define TK_HAVING 147 +#define TK_LIMIT 148 +#define TK_WHERE 149 +#define TK_RETURNING 150 +#define TK_INTO 151 +#define TK_NOTHING 152 +#define TK_FLOAT 153 +#define TK_BLOB 154 +#define TK_INTEGER 155 +#define TK_VARIABLE 156 +#define TK_CASE 157 +#define TK_WHEN 158 +#define TK_THEN 159 +#define TK_ELSE 160 +#define TK_INDEX 161 +#define TK_ALTER 162 +#define TK_ADD 163 +#define TK_WINDOW 164 +#define TK_OVER 165 +#define TK_FILTER 166 +#define TK_COLUMN 167 +#define TK_AGG_FUNCTION 168 +#define TK_AGG_COLUMN 169 +#define TK_TRUEFALSE 170 +#define TK_ISNOT 171 +#define TK_FUNCTION 172 +#define TK_UMINUS 173 +#define TK_UPLUS 174 +#define TK_TRUTH 175 +#define TK_REGISTER 176 +#define TK_VECTOR 177 +#define TK_SELECT_COLUMN 178 +#define TK_IF_NULL_ROW 179 +#define TK_ASTERISK 180 +#define TK_SPAN 181 +#define TK_ERROR 182 +#define TK_SPACE 183 +#define TK_ILLEGAL 184 /************** End of parse.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -14439,7 +14114,7 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); ** number of pages. A negative number N translations means that a buffer ** of -1024*N bytes is allocated and used for as many pages as it will hold. ** -** The default value of "20" was choosen to minimize the run-time of the +** The default value of "20" was chosen to minimize the run-time of the ** speedtest1 test program with options: --shrink-memory --reprepare */ #ifndef SQLITE_DEFAULT_PCACHE_INITSZ @@ -14601,6 +14276,7 @@ typedef INT16_TYPE LogEst; # define SQLITE_PTRSIZE __SIZEOF_POINTER__ # elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(_M_ARM) || defined(__arm__) || defined(__x86) || \ + (defined(__APPLE__) && defined(__POWERPC__)) || \ (defined(__TOS_AIX__) && !defined(__64BIT__)) # define SQLITE_PTRSIZE 4 # else @@ -14795,11 +14471,25 @@ struct BusyHandler { /* ** Name of table that holds the database schema. +** +** The PREFERRED names are used whereever possible. But LEGACY is also +** used for backwards compatibility. +** +** 1. Queries can use either the PREFERRED or the LEGACY names +** 2. The sqlite3_set_authorizer() callback uses the LEGACY name +** 3. The PRAGMA table_list statement uses the PREFERRED name +** +** The LEGACY names are stored in the internal symbol hash table +** in support of (2). Names are translated using sqlite3PreferredTableName() +** for (3). The sqlite3FindTable() function takes care of translating +** names for (1). +** +** Note that "sqlite_temp_schema" can also be called "temp.sqlite_schema". */ -#define DFLT_SCHEMA_TABLE "sqlite_master" -#define DFLT_TEMP_SCHEMA_TABLE "sqlite_temp_master" -#define ALT_SCHEMA_TABLE "sqlite_schema" -#define ALT_TEMP_SCHEMA_TABLE "sqlite_temp_schema" +#define LEGACY_SCHEMA_TABLE "sqlite_master" +#define LEGACY_TEMP_SCHEMA_TABLE "sqlite_temp_master" +#define PREFERRED_SCHEMA_TABLE "sqlite_schema" +#define PREFERRED_TEMP_SCHEMA_TABLE "sqlite_temp_schema" /* @@ -14811,7 +14501,7 @@ struct BusyHandler { ** The name of the schema table. The name is different for TEMP. */ #define SCHEMA_TABLE(x) \ - ((!OMIT_TEMPDB)&&(x==1)?DFLT_TEMP_SCHEMA_TABLE:DFLT_SCHEMA_TABLE) + ((!OMIT_TEMPDB)&&(x==1)?LEGACY_TEMP_SCHEMA_TABLE:LEGACY_SCHEMA_TABLE) /* ** A convenience macro that returns the number of elements in @@ -14960,10 +14650,11 @@ typedef struct With With; /* ** A bit in a Bitmask */ -#define MASKBIT(n) (((Bitmask)1)<<(n)) -#define MASKBIT64(n) (((u64)1)<<(n)) -#define MASKBIT32(n) (((unsigned int)1)<<(n)) -#define ALLBITS ((Bitmask)-1) +#define MASKBIT(n) (((Bitmask)1)<<(n)) +#define MASKBIT64(n) (((u64)1)<<(n)) +#define MASKBIT32(n) (((unsigned int)1)<<(n)) +#define SMASKBIT32(n) ((n)<=31?((unsigned int)1)<<(n):0) +#define ALLBITS ((Bitmask)-1) /* A VList object records a mapping between parameters/variables/wildcards ** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer @@ -15352,7 +15043,7 @@ SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *); #define BTREE_BLOBKEY 2 /* Table has keys only - no data */ SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree*, int, int*); -SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, int*); +SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, i64*); SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree*, int, int); @@ -15476,13 +15167,17 @@ SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor*, int, ...); #endif SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( +SQLITE_PRIVATE int sqlite3BtreeTableMoveto( BtCursor*, - UnpackedRecord *pUnKey, i64 intKey, int bias, int *pRes ); +SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( + BtCursor*, + UnpackedRecord *pUnKey, + int *pRes +); SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor*, int*); SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags); @@ -15833,35 +15528,35 @@ typedef struct VdbeOpList VdbeOpList; #define OP_If 18 /* jump */ #define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ #define OP_IfNot 20 /* jump */ -#define OP_IfNullRow 21 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */ -#define OP_SeekLT 22 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekLE 23 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGE 24 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGT 25 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IfNotOpen 26 /* jump, synopsis: if( !csr[P1] ) goto P2 */ -#define OP_IfNoHope 27 /* jump, synopsis: key=r[P3@P4] */ -#define OP_NoConflict 28 /* jump, synopsis: key=r[P3@P4] */ -#define OP_NotFound 29 /* jump, synopsis: key=r[P3@P4] */ -#define OP_Found 30 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekRowid 31 /* jump, synopsis: intkey=r[P3] */ -#define OP_NotExists 32 /* jump, synopsis: intkey=r[P3] */ -#define OP_Last 33 /* jump */ -#define OP_IfSmaller 34 /* jump */ -#define OP_SorterSort 35 /* jump */ -#define OP_Sort 36 /* jump */ -#define OP_Rewind 37 /* jump */ -#define OP_IdxLE 38 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IdxGT 39 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IdxLT 40 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IdxGE 41 /* jump, synopsis: key=r[P3@P4] */ -#define OP_RowSetRead 42 /* jump, synopsis: r[P3]=rowset(P1) */ +#define OP_IsNullOrType 21 /* jump, synopsis: if typeof(r[P1]) IN (P3,5) goto P2 */ +#define OP_IfNullRow 22 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */ +#define OP_SeekLT 23 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekLE 24 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekGE 25 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekGT 26 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IfNotOpen 27 /* jump, synopsis: if( !csr[P1] ) goto P2 */ +#define OP_IfNoHope 28 /* jump, synopsis: key=r[P3@P4] */ +#define OP_NoConflict 29 /* jump, synopsis: key=r[P3@P4] */ +#define OP_NotFound 30 /* jump, synopsis: key=r[P3@P4] */ +#define OP_Found 31 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekRowid 32 /* jump, synopsis: intkey=r[P3] */ +#define OP_NotExists 33 /* jump, synopsis: intkey=r[P3] */ +#define OP_Last 34 /* jump */ +#define OP_IfSmaller 35 /* jump */ +#define OP_SorterSort 36 /* jump */ +#define OP_Sort 37 /* jump */ +#define OP_Rewind 38 /* jump */ +#define OP_IdxLE 39 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IdxGT 40 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IdxLT 41 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IdxGE 42 /* jump, synopsis: key=r[P3@P4] */ #define OP_Or 43 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */ #define OP_And 44 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */ -#define OP_RowSetTest 45 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ -#define OP_Program 46 /* jump */ -#define OP_FkIfZero 47 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ -#define OP_IfPos 48 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ -#define OP_IfNotZero 49 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ +#define OP_RowSetRead 45 /* jump, synopsis: r[P3]=rowset(P1) */ +#define OP_RowSetTest 46 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ +#define OP_Program 47 /* jump */ +#define OP_FkIfZero 48 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ +#define OP_IfPos 49 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ #define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ #define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ #define OP_Ne 52 /* jump, same as TK_NE, synopsis: IF r[P3]!=r[P1] */ @@ -15871,49 +15566,49 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Lt 56 /* jump, same as TK_LT, synopsis: IF r[P3]=r[P1] */ #define OP_ElseEq 58 /* jump, same as TK_ESCAPE */ -#define OP_DecrJumpZero 59 /* jump, synopsis: if (--r[P1])==0 goto P2 */ -#define OP_IncrVacuum 60 /* jump */ -#define OP_VNext 61 /* jump */ -#define OP_Init 62 /* jump, synopsis: Start at P2 */ -#define OP_PureFunc 63 /* synopsis: r[P3]=func(r[P2@NP]) */ -#define OP_Function 64 /* synopsis: r[P3]=func(r[P2@NP]) */ -#define OP_Return 65 -#define OP_EndCoroutine 66 -#define OP_HaltIfNull 67 /* synopsis: if r[P3]=null halt */ -#define OP_Halt 68 -#define OP_Integer 69 /* synopsis: r[P2]=P1 */ -#define OP_Int64 70 /* synopsis: r[P2]=P4 */ -#define OP_String 71 /* synopsis: r[P2]='P4' (len=P1) */ -#define OP_Null 72 /* synopsis: r[P2..P3]=NULL */ -#define OP_SoftNull 73 /* synopsis: r[P1]=NULL */ -#define OP_Blob 74 /* synopsis: r[P2]=P4 (len=P1) */ -#define OP_Variable 75 /* synopsis: r[P2]=parameter(P1,P4) */ -#define OP_Move 76 /* synopsis: r[P2@P3]=r[P1@P3] */ -#define OP_Copy 77 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ -#define OP_SCopy 78 /* synopsis: r[P2]=r[P1] */ -#define OP_IntCopy 79 /* synopsis: r[P2]=r[P1] */ -#define OP_ChngCntRow 80 /* synopsis: output=r[P1] */ -#define OP_ResultRow 81 /* synopsis: output=r[P1@P2] */ -#define OP_CollSeq 82 -#define OP_AddImm 83 /* synopsis: r[P1]=r[P1]+P2 */ -#define OP_RealAffinity 84 -#define OP_Cast 85 /* synopsis: affinity(r[P1]) */ -#define OP_Permutation 86 -#define OP_Compare 87 /* synopsis: r[P1@P3] <-> r[P2@P3] */ -#define OP_IsTrue 88 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ -#define OP_ZeroOrNull 89 /* synopsis: r[P2] = 0 OR NULL */ -#define OP_Offset 90 /* synopsis: r[P3] = sqlite_offset(P1) */ -#define OP_Column 91 /* synopsis: r[P3]=PX */ -#define OP_Affinity 92 /* synopsis: affinity(r[P1@P2]) */ -#define OP_MakeRecord 93 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ -#define OP_Count 94 /* synopsis: r[P2]=count() */ -#define OP_ReadCookie 95 -#define OP_SetCookie 96 -#define OP_ReopenIdx 97 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenRead 98 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenWrite 99 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenDup 100 -#define OP_OpenAutoindex 101 /* synopsis: nColumn=P2 */ +#define OP_IfNotZero 59 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ +#define OP_DecrJumpZero 60 /* jump, synopsis: if (--r[P1])==0 goto P2 */ +#define OP_IncrVacuum 61 /* jump */ +#define OP_VNext 62 /* jump */ +#define OP_Filter 63 /* jump, synopsis: if key(P3@P4) not in filter(P1) goto P2 */ +#define OP_Init 64 /* jump, synopsis: Start at P2 */ +#define OP_PureFunc 65 /* synopsis: r[P3]=func(r[P2@NP]) */ +#define OP_Function 66 /* synopsis: r[P3]=func(r[P2@NP]) */ +#define OP_Return 67 +#define OP_EndCoroutine 68 +#define OP_HaltIfNull 69 /* synopsis: if r[P3]=null halt */ +#define OP_Halt 70 +#define OP_Integer 71 /* synopsis: r[P2]=P1 */ +#define OP_Int64 72 /* synopsis: r[P2]=P4 */ +#define OP_String 73 /* synopsis: r[P2]='P4' (len=P1) */ +#define OP_Null 74 /* synopsis: r[P2..P3]=NULL */ +#define OP_SoftNull 75 /* synopsis: r[P1]=NULL */ +#define OP_Blob 76 /* synopsis: r[P2]=P4 (len=P1) */ +#define OP_Variable 77 /* synopsis: r[P2]=parameter(P1,P4) */ +#define OP_Move 78 /* synopsis: r[P2@P3]=r[P1@P3] */ +#define OP_Copy 79 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ +#define OP_SCopy 80 /* synopsis: r[P2]=r[P1] */ +#define OP_IntCopy 81 /* synopsis: r[P2]=r[P1] */ +#define OP_FkCheck 82 +#define OP_ResultRow 83 /* synopsis: output=r[P1@P2] */ +#define OP_CollSeq 84 +#define OP_AddImm 85 /* synopsis: r[P1]=r[P1]+P2 */ +#define OP_RealAffinity 86 +#define OP_Cast 87 /* synopsis: affinity(r[P1]) */ +#define OP_Permutation 88 +#define OP_Compare 89 /* synopsis: r[P1@P3] <-> r[P2@P3] */ +#define OP_IsTrue 90 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ +#define OP_ZeroOrNull 91 /* synopsis: r[P2] = 0 OR NULL */ +#define OP_Offset 92 /* synopsis: r[P3] = sqlite_offset(P1) */ +#define OP_Column 93 /* synopsis: r[P3]=PX */ +#define OP_TypeCheck 94 /* synopsis: typecheck(r[P1@P2]) */ +#define OP_Affinity 95 /* synopsis: affinity(r[P1@P2]) */ +#define OP_MakeRecord 96 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ +#define OP_Count 97 /* synopsis: r[P2]=count() */ +#define OP_ReadCookie 98 +#define OP_SetCookie 99 +#define OP_ReopenIdx 100 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenRead 101 /* synopsis: root=P2 iDb=P3 */ #define OP_BitAnd 102 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ #define OP_BitOr 103 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ #define OP_ShiftLeft 104 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ -#define OP_AggInverse 157 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */ -#define OP_AggStep 158 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggStep1 159 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggValue 160 /* synopsis: r[P3]=value N=P2 */ -#define OP_AggFinal 161 /* synopsis: accum=r[P1] N=P2 */ -#define OP_Expire 162 -#define OP_CursorLock 163 -#define OP_CursorUnlock 164 -#define OP_TableLock 165 /* synopsis: iDb=P1 root=P2 write=P3 */ -#define OP_VBegin 166 -#define OP_VCreate 167 -#define OP_VDestroy 168 -#define OP_VOpen 169 -#define OP_VColumn 170 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 171 -#define OP_Pagecount 172 -#define OP_MaxPgcnt 173 -#define OP_Trace 174 -#define OP_CursorHint 175 -#define OP_ReleaseReg 176 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 177 -#define OP_Explain 178 -#define OP_Abortable 179 +#define OP_OpenWrite 112 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenDup 113 +#define OP_BitNot 114 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */ +#define OP_OpenAutoindex 115 /* synopsis: nColumn=P2 */ +#define OP_OpenEphemeral 116 /* synopsis: nColumn=P2 */ +#define OP_String8 117 /* same as TK_STRING, synopsis: r[P2]='P4' */ +#define OP_SorterOpen 118 +#define OP_SequenceTest 119 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ +#define OP_OpenPseudo 120 /* synopsis: P3 columns in r[P2] */ +#define OP_Close 121 +#define OP_ColumnsUsed 122 +#define OP_SeekScan 123 /* synopsis: Scan-ahead up to P1 rows */ +#define OP_SeekHit 124 /* synopsis: set P2<=seekHit<=P3 */ +#define OP_Sequence 125 /* synopsis: r[P2]=cursor[P1].ctr++ */ +#define OP_NewRowid 126 /* synopsis: r[P2]=rowid */ +#define OP_Insert 127 /* synopsis: intkey=r[P3] data=r[P2] */ +#define OP_RowCell 128 +#define OP_Delete 129 +#define OP_ResetCount 130 +#define OP_SorterCompare 131 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ +#define OP_SorterData 132 /* synopsis: r[P2]=data */ +#define OP_RowData 133 /* synopsis: r[P2]=data */ +#define OP_Rowid 134 /* synopsis: r[P2]=rowid */ +#define OP_NullRow 135 +#define OP_SeekEnd 136 +#define OP_IdxInsert 137 /* synopsis: key=r[P2] */ +#define OP_SorterInsert 138 /* synopsis: key=r[P2] */ +#define OP_IdxDelete 139 /* synopsis: key=r[P2@P3] */ +#define OP_DeferredSeek 140 /* synopsis: Move P3 to P1.rowid if needed */ +#define OP_IdxRowid 141 /* synopsis: r[P2]=rowid */ +#define OP_FinishSeek 142 +#define OP_Destroy 143 +#define OP_Clear 144 +#define OP_ResetSorter 145 +#define OP_CreateBtree 146 /* synopsis: r[P2]=root iDb=P1 flags=P3 */ +#define OP_SqlExec 147 +#define OP_ParseSchema 148 +#define OP_LoadAnalysis 149 +#define OP_DropTable 150 +#define OP_DropIndex 151 +#define OP_DropTrigger 152 +#define OP_Real 153 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ +#define OP_IntegrityCk 154 +#define OP_RowSetAdd 155 /* synopsis: rowset(P1)=r[P2] */ +#define OP_Param 156 +#define OP_FkCounter 157 /* synopsis: fkctr[P1]+=P2 */ +#define OP_MemMax 158 /* synopsis: r[P1]=max(r[P1],r[P2]) */ +#define OP_OffsetLimit 159 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ +#define OP_AggInverse 160 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */ +#define OP_AggStep 161 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggStep1 162 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggValue 163 /* synopsis: r[P3]=value N=P2 */ +#define OP_AggFinal 164 /* synopsis: accum=r[P1] N=P2 */ +#define OP_Expire 165 +#define OP_CursorLock 166 +#define OP_CursorUnlock 167 +#define OP_TableLock 168 /* synopsis: iDb=P1 root=P2 write=P3 */ +#define OP_VBegin 169 +#define OP_VCreate 170 +#define OP_VDestroy 171 +#define OP_VOpen 172 +#define OP_VInitIn 173 /* synopsis: r[P2]=ValueList(P1,P3) */ +#define OP_VColumn 174 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 175 +#define OP_Pagecount 176 +#define OP_MaxPgcnt 177 +#define OP_FilterAdd 178 /* synopsis: filter(P1) += key(P3@P4) */ +#define OP_Trace 179 +#define OP_CursorHint 180 +#define OP_ReleaseReg 181 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 182 +#define OP_Explain 183 +#define OP_Abortable 184 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -16006,27 +15706,28 @@ typedef struct VdbeOpList VdbeOpList; #define OPFLG_INITIALIZER {\ /* 0 */ 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x10,\ /* 8 */ 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x03, 0x03,\ -/* 16 */ 0x01, 0x01, 0x03, 0x12, 0x03, 0x01, 0x09, 0x09,\ -/* 24 */ 0x09, 0x09, 0x01, 0x09, 0x09, 0x09, 0x09, 0x09,\ -/* 32 */ 0x09, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\ -/* 40 */ 0x01, 0x01, 0x23, 0x26, 0x26, 0x0b, 0x01, 0x01,\ -/* 48 */ 0x03, 0x03, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ -/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x01, 0x01, 0x01, 0x00,\ -/* 64 */ 0x00, 0x02, 0x02, 0x08, 0x00, 0x10, 0x10, 0x10,\ -/* 72 */ 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10,\ -/* 80 */ 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00,\ -/* 88 */ 0x12, 0x1e, 0x20, 0x00, 0x00, 0x00, 0x10, 0x10,\ -/* 96 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x26,\ +/* 16 */ 0x01, 0x01, 0x03, 0x12, 0x03, 0x03, 0x01, 0x09,\ +/* 24 */ 0x09, 0x09, 0x09, 0x01, 0x09, 0x09, 0x09, 0x09,\ +/* 32 */ 0x09, 0x09, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\ +/* 40 */ 0x01, 0x01, 0x01, 0x26, 0x26, 0x23, 0x0b, 0x01,\ +/* 48 */ 0x01, 0x03, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ +/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x01, 0x01, 0x01,\ +/* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ +/* 72 */ 0x10, 0x10, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00,\ +/* 80 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02,\ +/* 88 */ 0x00, 0x00, 0x12, 0x1e, 0x20, 0x00, 0x00, 0x00,\ +/* 96 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x26, 0x26,\ /* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\ -/* 112 */ 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,\ -/* 120 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00,\ -/* 128 */ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x04, 0x04,\ -/* 136 */ 0x00, 0x00, 0x10, 0x00, 0x10, 0x00, 0x00, 0x10,\ -/* 144 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,\ -/* 152 */ 0x10, 0x10, 0x00, 0x04, 0x1a, 0x00, 0x00, 0x00,\ +/* 112 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00,\ +/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\ +/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,\ +/* 136 */ 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00, 0x10,\ +/* 144 */ 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 152 */ 0x00, 0x10, 0x00, 0x06, 0x10, 0x00, 0x04, 0x1a,\ /* 160 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ -/* 176 */ 0x00, 0x00, 0x00, 0x00,} +/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,\ +/* 176 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 184 */ 0x00,} /* The resolve3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum @@ -16034,7 +15735,7 @@ typedef struct VdbeOpList VdbeOpList; ** generated this include file strives to group all JUMP opcodes ** together near the beginning of the list. */ -#define SQLITE_MX_JUMP_OPCODE 62 /* Maximum JUMP opcode */ +#define SQLITE_MX_JUMP_OPCODE 64 /* Maximum JUMP opcode */ /************** End of opcodes.h *********************************************/ /************** Continuing where we left off in vdbe.h ***********************/ @@ -17095,6 +16796,7 @@ struct sqlite3 { u32 nSchemaLock; /* Do not reset the schema when non-zero */ unsigned int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */ int errCode; /* Most recent error code (SQLITE_*) */ + int errByteOffset; /* Byte offset of error in SQL statement */ int errMask; /* & result codes with this before returning */ int iSysErrno; /* Errno value from last system error */ u32 dbOptFlags; /* Flags to enable/disable optimizations */ @@ -17111,10 +16813,10 @@ struct sqlite3 { u8 mTrace; /* zero or more SQLITE_TRACE flags */ u8 noSharedCache; /* True if no shared-cache backends */ u8 nSqlExec; /* Number of pending OP_SqlExec opcodes */ + u8 eOpenState; /* Current condition of the connection */ int nextPagesize; /* Pagesize after VACUUM if >0 */ - u32 magic; /* Magic number for detect library misuse */ - int nChange; /* Value returned by sqlite3_changes() */ - int nTotalChange; /* Value returned by sqlite3_total_changes() */ + i64 nChange; /* Value returned by sqlite3_changes() */ + i64 nTotalChange; /* Value returned by sqlite3_total_changes() */ int aLimit[SQLITE_N_LIMIT]; /* Limits */ int nMaxSorterMmap; /* Maximum size of regions mapped by sorter */ struct sqlite3InitInfo { /* Information used during initialization */ @@ -17124,7 +16826,7 @@ struct sqlite3 { unsigned orphanTrigger : 1; /* Last statement is orphaned TEMP trigger */ unsigned imposterTable : 1; /* Building an imposter table */ unsigned reopenMemdb : 1; /* ATTACH is really a reopen using MemDB */ - char **azInit; /* "type", "name", and "tbl_name" columns */ + const char **azInit; /* "type", "name", and "tbl_name" columns */ } init; int nVdbeActive; /* Number of VDBEs currently running */ int nVdbeRead; /* Number of active VDBEs that read or write */ @@ -17134,10 +16836,10 @@ struct sqlite3 { int nExtension; /* Number of loaded extensions */ void **aExtension; /* Array of shared library handles */ union { - void (*xLegacy)(void*,const char*); /* Legacy trace function */ - int (*xV2)(u32,void*,void*,void*); /* V2 Trace function */ + void (*xLegacy)(void*,const char*); /* mTrace==SQLITE_TRACE_LEGACY */ + int (*xV2)(u32,void*,void*,void*); /* All other mTrace values */ } trace; - void *pTraceArg; /* Argument to the trace function */ + void *pTraceArg; /* Argument to the trace function */ #ifndef SQLITE_OMIT_DEPRECATED void (*xProfile)(void*,const char*,u64); /* Profiling function */ void *pProfileArg; /* Argument to profile function */ @@ -17148,6 +16850,9 @@ struct sqlite3 { void (*xRollbackCallback)(void*); /* Invoked at every commit. */ void *pUpdateArg; void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64); + void *pAutovacPagesArg; /* Client argument to autovac_pages */ + void (*xAutovacDestr)(void*); /* Destructor for pAutovacPAgesArg */ + unsigned int (*xAutovacPages)(void*,const char*,u32,u32,u32); Parse *pParse; /* Current parse */ #ifdef SQLITE_ENABLE_PREUPDATE_HOOK void *pPreUpdateArg; /* First argument to xPreUpdateCallback */ @@ -17277,6 +16982,7 @@ struct sqlite3 { #define SQLITE_CountRows HI(0x00001) /* Count rows changed by INSERT, */ /* DELETE, or UPDATE and return */ /* the count using a callback. */ +#define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */ /* Flags used only if debugging */ #ifdef SQLITE_DEBUG @@ -17323,6 +17029,11 @@ struct sqlite3 { #define SQLITE_PropagateConst 0x00008000 /* The constant propagation opt */ #define SQLITE_MinMaxOpt 0x00010000 /* The min/max optimization */ #define SQLITE_SeekScan 0x00020000 /* The OP_SeekScan optimization */ +#define SQLITE_OmitOrderBy 0x00040000 /* Omit pointless ORDER BY */ + /* TH3 expects this value ^^^^^^^^^^ to be 0x40000. Coordinate any change */ +#define SQLITE_BloomFilter 0x00080000 /* Use a Bloom filter on searches */ +#define SQLITE_BloomPulldown 0x00100000 /* Run Bloom filters early */ +#define SQLITE_BalancedMerge 0x00200000 /* Balance multi-way merges */ #define SQLITE_AllOpts 0xffffffff /* All optimizations */ /* @@ -17337,17 +17048,16 @@ struct sqlite3 { */ #define ConstFactorOk(P) ((P)->okConstFactor) -/* -** Possible values for the sqlite.magic field. -** The numbers are obtained at random and have no special meaning, other -** than being distinct from one another. +/* Possible values for the sqlite3.eOpenState field. +** The numbers are randomly selected such that a minimum of three bits must +** change to convert any number to another or to zero */ -#define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */ -#define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */ -#define SQLITE_MAGIC_SICK 0x4b771290 /* Error and awaiting close */ -#define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */ -#define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */ -#define SQLITE_MAGIC_ZOMBIE 0x64cffc7f /* Close with last statement close */ +#define SQLITE_STATE_OPEN 0x76 /* Database is open */ +#define SQLITE_STATE_CLOSED 0xce /* Database is closed */ +#define SQLITE_STATE_SICK 0xba /* Error and awaiting close */ +#define SQLITE_STATE_BUSY 0x6d /* Database currently in use */ +#define SQLITE_STATE_ERROR 0xd5 /* An SQLITE_MISUSE error occurred */ +#define SQLITE_STATE_ZOMBIE 0xa7 /* Close with last statement close */ /* ** Each SQL function is defined by an instance of the following @@ -17372,7 +17082,7 @@ struct FuncDef { union { FuncDef *pHash; /* Next with a different name but the same hash */ FuncDestructor *pDestructor; /* Reference counted destructor function */ - } u; + } u; /* pHash if SQLITE_FUNC_BUILTIN, pDestructor otherwise */ }; /* @@ -17402,12 +17112,13 @@ struct FuncDestructor { ** are assert() statements in the code to verify this. ** ** Value constraints (enforced via assert()): -** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg -** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG -** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG -** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API -** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API -** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS +** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg +** SQLITE_FUNC_ANYORDER == NC_OrderAgg == SF_OrderByReqd +** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG +** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG +** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API +** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API +** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS ** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API */ #define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */ @@ -17432,6 +17143,8 @@ struct FuncDestructor { #define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */ #define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */ #define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */ +#define SQLITE_FUNC_BUILTIN 0x00800000 /* This is a built-in function */ +#define SQLITE_FUNC_ANYORDER 0x08000000 /* count/min/max aggregate */ /* Identifier numbers for each in-line function */ #define INLINEFUNC_coalesce 0 @@ -17494,7 +17207,7 @@ struct FuncDestructor { ** are interpreted in the same way as the first 4 parameters to ** FUNCTION(). ** -** WFUNCTION(zName, nArg, iArg, xStep, xFinal, xValue, xInverse) +** WAGGREGATE(zName, nArg, iArg, xStep, xFinal, xValue, xInverse) ** Used to create an aggregate function definition implemented by ** the C functions xStep and xFinal. The first four parameters ** are interpreted in the same way as the first 4 parameters to @@ -17509,44 +17222,55 @@ struct FuncDestructor { ** parameter. */ #define FUNCTION(zName, nArg, iArg, bNC, xFunc) \ - {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ + {nArg, SQLITE_FUNC_BUILTIN|\ + SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } #define VFUNCTION(zName, nArg, iArg, bNC, xFunc) \ - {nArg, SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } #define SFUNCTION(zName, nArg, iArg, bNC, xFunc) \ - {nArg, SQLITE_UTF8|SQLITE_DIRECTONLY|SQLITE_FUNC_UNSAFE, \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_UTF8|SQLITE_DIRECTONLY|SQLITE_FUNC_UNSAFE, \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } #define MFUNCTION(zName, nArg, xPtr, xFunc) \ - {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ xPtr, 0, xFunc, 0, 0, 0, #zName, {0} } +#define JFUNCTION(zName, nArg, iArg, xFunc) \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS|\ + SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ + SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } #define INLINE_FUNC(zName, nArg, iArg, mFlags) \ - {nArg, SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \ + {nArg, SQLITE_FUNC_BUILTIN|\ + SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \ SQLITE_INT_TO_PTR(iArg), 0, noopFunc, 0, 0, 0, #zName, {0} } #define TEST_FUNC(zName, nArg, iArg, mFlags) \ - {nArg, SQLITE_UTF8|SQLITE_FUNC_INTERNAL|SQLITE_FUNC_TEST| \ + {nArg, SQLITE_FUNC_BUILTIN|\ + SQLITE_UTF8|SQLITE_FUNC_INTERNAL|SQLITE_FUNC_TEST| \ SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \ SQLITE_INT_TO_PTR(iArg), 0, noopFunc, 0, 0, 0, #zName, {0} } #define DFUNCTION(zName, nArg, iArg, bNC, xFunc) \ - {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8, \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_SLOCHNG|SQLITE_UTF8, \ 0, 0, xFunc, 0, 0, 0, #zName, {0} } #define PURE_DATE(zName, nArg, iArg, bNC, xFunc) \ - {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|SQLITE_FUNC_CONSTANT, \ + {nArg, SQLITE_FUNC_BUILTIN|\ + SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|SQLITE_FUNC_CONSTANT, \ (void*)&sqlite3Config, 0, xFunc, 0, 0, 0, #zName, {0} } #define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \ - {nArg,SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\ + {nArg, SQLITE_FUNC_BUILTIN|\ + SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } #define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \ - {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ + {nArg, SQLITE_FUNC_BUILTIN|\ + SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ pArg, 0, xFunc, 0, 0, 0, #zName, } #define LIKEFUNC(zName, nArg, arg, flags) \ - {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \ (void *)arg, 0, likeFunc, 0, 0, 0, #zName, {0} } #define WAGGREGATE(zName, nArg, arg, nc, xStep, xFinal, xValue, xInverse, f) \ - {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|f, \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|f, \ SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,xValue,xInverse,#zName, {0}} #define INTERNAL_FUNCTION(zName, nArg, xFunc) \ - {nArg, SQLITE_FUNC_INTERNAL|SQLITE_UTF8|SQLITE_FUNC_CONSTANT, \ + {nArg, SQLITE_FUNC_BUILTIN|\ + SQLITE_FUNC_INTERNAL|SQLITE_UTF8|SQLITE_FUNC_CONSTANT, \ 0, 0, xFunc, 0, 0, 0, #zName, {0} } @@ -17602,18 +17326,42 @@ struct Module { ** or equal to the table column index. It is ** equal if and only if there are no VIRTUAL ** columns to the left. +** +** Notes on zCnName: +** The zCnName field stores the name of the column, the datatype of the +** column, and the collating sequence for the column, in that order, all in +** a single allocation. Each string is 0x00 terminated. The datatype +** is only included if the COLFLAG_HASTYPE bit of colFlags is set and the +** collating sequence name is only included if the COLFLAG_HASCOLL bit is +** set. */ struct Column { - char *zName; /* Name of this column, \000, then the type */ - Expr *pDflt; /* Default value or GENERATED ALWAYS AS value */ - char *zColl; /* Collating sequence. If NULL, use the default */ - u8 notNull; /* An OE_ code for handling a NOT NULL constraint */ - char affinity; /* One of the SQLITE_AFF_... values */ - u8 szEst; /* Estimated size of value in this column. sizeof(INT)==1 */ - u8 hName; /* Column name hash for faster lookup */ - u16 colFlags; /* Boolean properties. See COLFLAG_ defines below */ + char *zCnName; /* Name of this column */ + unsigned notNull :4; /* An OE_ code for handling a NOT NULL constraint */ + unsigned eCType :4; /* One of the standard types */ + char affinity; /* One of the SQLITE_AFF_... values */ + u8 szEst; /* Est size of value in this column. sizeof(INT)==1 */ + u8 hName; /* Column name hash for faster lookup */ + u16 iDflt; /* 1-based index of DEFAULT. 0 means "none" */ + u16 colFlags; /* Boolean properties. See COLFLAG_ defines below */ }; +/* Allowed values for Column.eCType. +** +** Values must match entries in the global constant arrays +** sqlite3StdTypeLen[] and sqlite3StdType[]. Each value is one more +** than the offset into these arrays for the corresponding name. +** Adjust the SQLITE_N_STDTYPE value if adding or removing entries. +*/ +#define COLTYPE_CUSTOM 0 /* Type appended to zName */ +#define COLTYPE_ANY 1 +#define COLTYPE_BLOB 2 +#define COLTYPE_INT 3 +#define COLTYPE_INTEGER 4 +#define COLTYPE_REAL 5 +#define COLTYPE_TEXT 6 +#define SQLITE_N_STDTYPE 6 /* Number of standard types */ + /* Allowed values for Column.colFlags. ** ** Constraints: @@ -17630,6 +17378,7 @@ struct Column { #define COLFLAG_STORED 0x0040 /* GENERATED ALWAYS AS ... STORED */ #define COLFLAG_NOTAVAIL 0x0080 /* STORED column not yet calculated */ #define COLFLAG_BUSY 0x0100 /* Blocks recursion on GENERATED columns */ +#define COLFLAG_HASCOLL 0x0200 /* Has collating sequence name in zCnName */ #define COLFLAG_GENERATED 0x0060 /* Combo: _STORED, _VIRTUAL */ #define COLFLAG_NOINSERT 0x0062 /* Combo: _HIDDEN, _STORED, _VIRTUAL */ @@ -17759,15 +17508,13 @@ struct VTable { #define SQLITE_VTABRISK_High 2 /* -** The schema for each SQL table and view is represented in memory -** by an instance of the following structure. +** The schema for each SQL table, virtual table, and view is represented +** in memory by an instance of the following structure. */ struct Table { char *zName; /* Name of the table or view */ Column *aCol; /* Information about each column */ Index *pIndex; /* List of SQL indexes on this table. */ - Select *pSelect; /* NULL for tables. Points to definition if a view. */ - FKey *pFKey; /* Linked list of all foreign keys in this table */ char *zColAff; /* String defining the affinity of each column */ ExprList *pCheck; /* All CHECK constraints */ /* ... also used as column name list in a VIEW */ @@ -17783,15 +17530,24 @@ struct Table { LogEst costMult; /* Cost multiplier for using this table */ #endif u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ -#ifndef SQLITE_OMIT_ALTERTABLE - int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */ -#endif -#ifndef SQLITE_OMIT_VIRTUALTABLE - int nModuleArg; /* Number of arguments to the module */ - char **azModuleArg; /* 0: module 1: schema 2: vtab name 3...: args */ - VTable *pVTable; /* List of VTable objects. */ -#endif - Trigger *pTrigger; /* List of triggers stored in pSchema */ + u8 eTabType; /* 0: normal, 1: virtual, 2: view */ + union { + struct { /* Used by ordinary tables: */ + int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */ + FKey *pFKey; /* Linked list of all foreign keys in this table */ + ExprList *pDfltList; /* DEFAULT clauses on various columns. + ** Or the AS clause for generated columns. */ + } tab; + struct { /* Used by views: */ + Select *pSelect; /* View definition */ + } view; + struct { /* Used by virtual tables only: */ + int nArg; /* Number of arguments to the module */ + char **azArg; /* 0: module 1: schema 2: vtab name 3...: args */ + VTable *p; /* List of VTable objects. */ + } vtab; + } u; + Trigger *pTrigger; /* List of triggers on this object */ Schema *pSchema; /* Schema that contains this table */ }; @@ -17810,24 +17566,35 @@ struct Table { ** TF_HasStored == COLFLAG_STORED ** TF_HasHidden == COLFLAG_HIDDEN */ -#define TF_Readonly 0x0001 /* Read-only system table */ -#define TF_HasHidden 0x0002 /* Has one or more hidden columns */ -#define TF_HasPrimaryKey 0x0004 /* Table has a primary key */ -#define TF_Autoincrement 0x0008 /* Integer primary key is autoincrement */ -#define TF_HasStat1 0x0010 /* nRowLogEst set from sqlite_stat1 */ -#define TF_HasVirtual 0x0020 /* Has one or more VIRTUAL columns */ -#define TF_HasStored 0x0040 /* Has one or more STORED columns */ -#define TF_HasGenerated 0x0060 /* Combo: HasVirtual + HasStored */ -#define TF_WithoutRowid 0x0080 /* No rowid. PRIMARY KEY is the key */ -#define TF_StatsUsed 0x0100 /* Query planner decisions affected by +#define TF_Readonly 0x00000001 /* Read-only system table */ +#define TF_HasHidden 0x00000002 /* Has one or more hidden columns */ +#define TF_HasPrimaryKey 0x00000004 /* Table has a primary key */ +#define TF_Autoincrement 0x00000008 /* Integer primary key is autoincrement */ +#define TF_HasStat1 0x00000010 /* nRowLogEst set from sqlite_stat1 */ +#define TF_HasVirtual 0x00000020 /* Has one or more VIRTUAL columns */ +#define TF_HasStored 0x00000040 /* Has one or more STORED columns */ +#define TF_HasGenerated 0x00000060 /* Combo: HasVirtual + HasStored */ +#define TF_WithoutRowid 0x00000080 /* No rowid. PRIMARY KEY is the key */ +#define TF_StatsUsed 0x00000100 /* Query planner decisions affected by ** Index.aiRowLogEst[] values */ -#define TF_NoVisibleRowid 0x0200 /* No user-visible "rowid" column */ -#define TF_OOOHidden 0x0400 /* Out-of-Order hidden columns */ -#define TF_HasNotNull 0x0800 /* Contains NOT NULL constraints */ -#define TF_Shadow 0x1000 /* True for a shadow table */ -#define TF_HasStat4 0x2000 /* STAT4 info available for this table */ -#define TF_Ephemeral 0x4000 /* An ephemeral table */ -#define TF_Eponymous 0x8000 /* An eponymous virtual table */ +#define TF_NoVisibleRowid 0x00000200 /* No user-visible "rowid" column */ +#define TF_OOOHidden 0x00000400 /* Out-of-Order hidden columns */ +#define TF_HasNotNull 0x00000800 /* Contains NOT NULL constraints */ +#define TF_Shadow 0x00001000 /* True for a shadow table */ +#define TF_HasStat4 0x00002000 /* STAT4 info available for this table */ +#define TF_Ephemeral 0x00004000 /* An ephemeral table */ +#define TF_Eponymous 0x00008000 /* An eponymous virtual table */ +#define TF_Strict 0x00010000 /* STRICT mode */ + +/* +** Allowed values for Table.eTabType +*/ +#define TABTYP_NORM 0 /* Ordinary table */ +#define TABTYP_VTAB 1 /* Virtual table */ +#define TABTYP_VIEW 2 /* A view */ + +#define IsView(X) ((X)->eTabType==TABTYP_VIEW) +#define IsOrdinaryTable(X) ((X)->eTabType==TABTYP_NORM) /* ** Test to see whether or not a table is a virtual table. This is @@ -17835,9 +17602,9 @@ struct Table { ** table support is omitted from the build. */ #ifndef SQLITE_OMIT_VIRTUALTABLE -# define IsVirtual(X) ((X)->nModuleArg) +# define IsVirtual(X) ((X)->eTabType==TABTYP_VTAB) # define ExprIsVtab(X) \ - ((X)->op==TK_COLUMN && (X)->y.pTab!=0 && (X)->y.pTab->nModuleArg) + ((X)->op==TK_COLUMN && (X)->y.pTab!=0 && (X)->y.pTab->eTabType==TABTYP_VTAB) #else # define IsVirtual(X) 0 # define ExprIsVtab(X) 0 @@ -18226,10 +17993,10 @@ typedef int ynVar; ** tree. ** ** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB, -** or TK_STRING), then Expr.token contains the text of the SQL literal. If -** the expression is a variable (TK_VARIABLE), then Expr.token contains the +** or TK_STRING), then Expr.u.zToken contains the text of the SQL literal. If +** the expression is a variable (TK_VARIABLE), then Expr.u.zToken contains the ** variable name. Finally, if the expression is an SQL function (TK_FUNCTION), -** then Expr.token contains the name of the function. +** then Expr.u.zToken contains the name of the function. ** ** Expr.pRight and Expr.pLeft are the left and right subexpressions of a ** binary operator. Either or both may be NULL. @@ -18269,7 +18036,7 @@ typedef int ynVar; ** help reduce memory requirements, sometimes an Expr object will be ** truncated. And to reduce the number of memory allocations, sometimes ** two or more Expr objects will be stored in a single memory allocation, -** together with Expr.zToken strings. +** together with Expr.u.zToken strings. ** ** If the EP_Reduced and EP_TokenOnly flags are set when ** an Expr object is truncated. When EP_Reduced is set, then all @@ -18325,7 +18092,10 @@ struct Expr { ** TK_VARIABLE: variable number (always >= 1). ** TK_SELECT_COLUMN: column of the result vector */ i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */ - int iRightJoinTable; /* If EP_FromJoin, the right table of the join */ + union { + int iRightJoinTable; /* If EP_FromJoin, the right table of the join */ + int iOfst; /* else: start of token from start of statement */ + } w; AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */ union { Table *pTab; /* TK_COLUMN: Table containing column. Can be NULL @@ -18338,8 +18108,7 @@ struct Expr { } y; }; -/* -** The following are the meanings of bits in the Expr.flags field. +/* The following are the meanings of bits in the Expr.flags field. ** Value restrictions: ** ** EP_Agg == NC_HasAgg == SF_HasAgg @@ -18378,14 +18147,12 @@ struct Expr { #define EP_FromDDL 0x40000000 /* Originates from sqlite_schema */ /* 0x80000000 // Available */ -/* -** The EP_Propagate mask is a set of properties that automatically propagate +/* The EP_Propagate mask is a set of properties that automatically propagate ** upwards into parent nodes. */ #define EP_Propagate (EP_Collate|EP_Subquery|EP_HasFunc) -/* -** These macros can be used to test, set, or clear bits in the +/* Macros can be used to test, set, or clear bits in the ** Expr.flags field. */ #define ExprHasProperty(E,P) (((E)->flags&(P))!=0) @@ -18395,6 +18162,16 @@ struct Expr { #define ExprAlwaysTrue(E) (((E)->flags&(EP_FromJoin|EP_IsTrue))==EP_IsTrue) #define ExprAlwaysFalse(E) (((E)->flags&(EP_FromJoin|EP_IsFalse))==EP_IsFalse) +/* Macros used to ensure that the correct members of unions are accessed +** in Expr. +*/ +#define ExprUseUToken(E) (((E)->flags&EP_IntValue)==0) +#define ExprUseUValue(E) (((E)->flags&EP_IntValue)!=0) +#define ExprUseXList(E) (((E)->flags&EP_xIsSelect)==0) +#define ExprUseXSelect(E) (((E)->flags&EP_xIsSelect)!=0) +#define ExprUseYTab(E) (((E)->flags&(EP_WinFunc|EP_Subrtn))==0) +#define ExprUseYWin(E) (((E)->flags&EP_WinFunc)!=0) +#define ExprUseYSub(E) (((E)->flags&EP_Subrtn)!=0) /* Flags for use with Expr.vvaFlags */ @@ -18477,11 +18254,12 @@ struct ExprList { unsigned bSorterRef :1; /* Defer evaluation until after sorting */ unsigned bNulls: 1; /* True if explicit "NULLS FIRST/LAST" */ union { - struct { + struct { /* Used by any ExprList other than Parse.pConsExpr */ u16 iOrderByCol; /* For ORDER BY, column number in result set */ u16 iAlias; /* Index into Parse.aAlias[] for zName */ } x; - int iConstExprReg; /* Register in which Expr value is cached */ + int iConstExprReg; /* Register in which Expr value is cached. Used only + ** by Parse.pConstExpr */ } u; } a[1]; /* One slot for each expression in the list */ }; @@ -18519,6 +18297,13 @@ struct IdList { /* ** The SrcItem object represents a single term in the FROM clause of a query. ** The SrcList object is mostly an array of SrcItems. +** +** Union member validity: +** +** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc +** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy +** u2.pIBIndex fg.isIndexedBy && !fg.isCte +** u2.pCteUse fg.isCte && !fg.isIndexedBy */ struct SrcItem { Schema *pSchema; /* Schema to which this item is fixed */ @@ -18667,31 +18452,33 @@ struct NameContext { ** Allowed values for the NameContext, ncFlags field. ** ** Value constraints (all checked via assert()): -** NC_HasAgg == SF_HasAgg == EP_Agg -** NC_MinMaxAgg == SF_MinMaxAgg == SQLITE_FUNC_MINMAX +** NC_HasAgg == SF_HasAgg == EP_Agg +** NC_MinMaxAgg == SF_MinMaxAgg == SQLITE_FUNC_MINMAX +** NC_OrderAgg == SF_OrderByReqd == SQLITE_FUNC_ANYORDER ** NC_HasWin == EP_Win ** */ -#define NC_AllowAgg 0x00001 /* Aggregate functions are allowed here */ -#define NC_PartIdx 0x00002 /* True if resolving a partial index WHERE */ -#define NC_IsCheck 0x00004 /* True if resolving a CHECK constraint */ -#define NC_GenCol 0x00008 /* True for a GENERATED ALWAYS AS clause */ -#define NC_HasAgg 0x00010 /* One or more aggregate functions seen */ -#define NC_IdxExpr 0x00020 /* True if resolving columns of CREATE INDEX */ -#define NC_SelfRef 0x0002e /* Combo: PartIdx, isCheck, GenCol, and IdxExpr */ -#define NC_VarSelect 0x00040 /* A correlated subquery has been seen */ -#define NC_UEList 0x00080 /* True if uNC.pEList is used */ -#define NC_UAggInfo 0x00100 /* True if uNC.pAggInfo is used */ -#define NC_UUpsert 0x00200 /* True if uNC.pUpsert is used */ -#define NC_UBaseReg 0x00400 /* True if uNC.iBaseReg is used */ -#define NC_MinMaxAgg 0x01000 /* min/max aggregates seen. See note above */ -#define NC_Complex 0x02000 /* True if a function or subquery seen */ -#define NC_AllowWin 0x04000 /* Window functions are allowed here */ -#define NC_HasWin 0x08000 /* One or more window functions seen */ -#define NC_IsDDL 0x10000 /* Resolving names in a CREATE statement */ -#define NC_InAggFunc 0x20000 /* True if analyzing arguments to an agg func */ -#define NC_FromDDL 0x40000 /* SQL text comes from sqlite_schema */ -#define NC_NoSelect 0x80000 /* Do not descend into sub-selects */ +#define NC_AllowAgg 0x000001 /* Aggregate functions are allowed here */ +#define NC_PartIdx 0x000002 /* True if resolving a partial index WHERE */ +#define NC_IsCheck 0x000004 /* True if resolving a CHECK constraint */ +#define NC_GenCol 0x000008 /* True for a GENERATED ALWAYS AS clause */ +#define NC_HasAgg 0x000010 /* One or more aggregate functions seen */ +#define NC_IdxExpr 0x000020 /* True if resolving columns of CREATE INDEX */ +#define NC_SelfRef 0x00002e /* Combo: PartIdx, isCheck, GenCol, and IdxExpr */ +#define NC_VarSelect 0x000040 /* A correlated subquery has been seen */ +#define NC_UEList 0x000080 /* True if uNC.pEList is used */ +#define NC_UAggInfo 0x000100 /* True if uNC.pAggInfo is used */ +#define NC_UUpsert 0x000200 /* True if uNC.pUpsert is used */ +#define NC_UBaseReg 0x000400 /* True if uNC.iBaseReg is used */ +#define NC_MinMaxAgg 0x001000 /* min/max aggregates seen. See note above */ +#define NC_Complex 0x002000 /* True if a function or subquery seen */ +#define NC_AllowWin 0x004000 /* Window functions are allowed here */ +#define NC_HasWin 0x008000 /* One or more window functions seen */ +#define NC_IsDDL 0x010000 /* Resolving names in a CREATE statement */ +#define NC_InAggFunc 0x020000 /* True if analyzing arguments to an agg func */ +#define NC_FromDDL 0x040000 /* SQL text comes from sqlite_schema */ +#define NC_NoSelect 0x080000 /* Do not descend into sub-selects */ +#define NC_OrderAgg 0x8000000 /* Has an aggregate other than count/min/max */ /* ** An instance of the following object describes a single ON CONFLICT @@ -18774,9 +18561,10 @@ struct Select { ** "Select Flag". ** ** Value constraints (all checked via assert()) -** SF_HasAgg == NC_HasAgg -** SF_MinMaxAgg == NC_MinMaxAgg == SQLITE_FUNC_MINMAX -** SF_FixedLimit == WHERE_USE_LIMIT +** SF_HasAgg == NC_HasAgg +** SF_MinMaxAgg == NC_MinMaxAgg == SQLITE_FUNC_MINMAX +** SF_OrderByReqd == NC_OrderAgg == SQLITE_FUNC_ANYORDER +** SF_FixedLimit == WHERE_USE_LIMIT */ #define SF_Distinct 0x0000001 /* Output should be DISTINCT */ #define SF_All 0x0000002 /* Includes the ALL keyword */ @@ -18801,10 +18589,11 @@ struct Select { #define SF_WinRewrite 0x0100000 /* Window function rewrite accomplished */ #define SF_View 0x0200000 /* SELECT statement is a view */ #define SF_NoopOrderBy 0x0400000 /* ORDER BY is ignored for this query */ -#define SF_UpdateFrom 0x0800000 /* Statement is an UPDATE...FROM */ +#define SF_UFSrcCheck 0x0800000 /* Check pSrc as required by UPDATE...FROM */ #define SF_PushDown 0x1000000 /* SELECT has be modified by push-down opt */ #define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ #define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */ +#define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */ /* ** The results of a SELECT can be distributed in several ways, as defined @@ -19046,7 +18835,8 @@ struct Parse { AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */ Parse *pToplevel; /* Parse structure for main program (or NULL) */ Table *pTriggerTab; /* Table triggers are being coded for */ - Parse *pParentParse; /* Parent parser if this parser is nested */ + TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ + ParseCleanup *pCleanup; /* List of cleanup operations to run after parse */ union { int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */ Returning *pReturning; /* The RETURNING clause */ @@ -19067,6 +18857,7 @@ struct Parse { **************************************************************************/ int aTempReg[8]; /* Holding area for temporary registers */ + Parse *pOuterParse; /* Outer Parse object when nested */ Token sNameToken; /* Token with unqualified schema object name */ /************************************************************************ @@ -19101,14 +18892,14 @@ struct Parse { Token sArg; /* Complete text of a module argument */ Table **apVtabLock; /* Pointer to virtual tables needing locking */ #endif - TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ With *pWith; /* Current WITH clause, or NULL */ - ParseCleanup *pCleanup; /* List of cleanup operations to run after parse */ #ifndef SQLITE_OMIT_ALTERTABLE RenameToken *pRename; /* Tokens subject to renaming by ALTER TABLE */ #endif }; +/* Allowed values for Parse.eParseMode +*/ #define PARSE_MODE_NORMAL 0 #define PARSE_MODE_DECLARE_VTAB 1 #define PARSE_MODE_RENAME 2 @@ -19117,7 +18908,8 @@ struct Parse { /* ** Sizes and pointers of various parts of the Parse object. */ -#define PARSE_HDR_SZ offsetof(Parse,aTempReg) /* Recursive part w/o aColCache*/ +#define PARSE_HDR(X) (((char*)(X))+offsetof(Parse,zErrMsg)) +#define PARSE_HDR_SZ (offsetof(Parse,aTempReg)-offsetof(Parse,zErrMsg)) /* Recursive part w/o aColCache*/ #define PARSE_RECURSE_SZ offsetof(Parse,sLastToken) /* Recursive part */ #define PARSE_TAIL_SZ (sizeof(Parse)-PARSE_RECURSE_SZ) /* Non-recursive part */ #define PARSE_TAIL(X) (((char*)(X))+PARSE_RECURSE_SZ) /* Pointer to tail */ @@ -19330,8 +19122,10 @@ typedef struct { /* ** Allowed values for mInitFlags */ +#define INITFLAG_AlterMask 0x0003 /* Types of ALTER */ #define INITFLAG_AlterRename 0x0001 /* Reparse after a RENAME */ #define INITFLAG_AlterDrop 0x0002 /* Reparse after a DROP COLUMN */ +#define INITFLAG_AlterAdd 0x0003 /* Reparse after an ADD COLUMN */ /* Tuning parameters are set using SQLITE_TESTCTRL_TUNE and are controlled ** on debug-builds of the CLI using ".testctrl tune ID VALUE". Tuning @@ -19410,6 +19204,7 @@ struct Sqlite3Config { int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */ #endif int bLocaltimeFault; /* True to fail localtime() calls */ + int (*xAltLocaltime)(const void*,void*); /* Alternative localtime() routine */ int iOnceResetThreshold; /* When to reset OP_Once counters */ u32 szSorterRef; /* Min size in bytes to use sorter-refs */ unsigned int iPrngSeed; /* Alternative fixed seed for the PRNG */ @@ -19452,8 +19247,8 @@ struct Walker { int n; /* A counter */ int iCur; /* A cursor number */ SrcList *pSrcList; /* FROM clause */ - struct SrcCount *pSrcCount; /* Counting column references */ struct CCurHint *pCCurHint; /* Used by codeCursorHint() */ + struct RefSrcList *pRefSrcList; /* sqlite3ReferencesSrcList() */ int *aiCol; /* array of column indexes */ struct IdxCover *pIdxCover; /* Check for index coverage */ struct IdxExprTrans *pIdxTrans; /* Convert idxed expr to column */ @@ -19638,7 +19433,7 @@ SQLITE_PRIVATE void sqlite3WindowListDelete(sqlite3 *db, Window *p); SQLITE_PRIVATE Window *sqlite3WindowAlloc(Parse*, int, int, Expr*, int , Expr*, u8); SQLITE_PRIVATE void sqlite3WindowAttach(Parse*, Expr*, Window*); SQLITE_PRIVATE void sqlite3WindowLink(Select *pSel, Window *pWin); -SQLITE_PRIVATE int sqlite3WindowCompare(Parse*, Window*, Window*, int); +SQLITE_PRIVATE int sqlite3WindowCompare(const Parse*, const Window*, const Window*, int); SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse*, Select*); SQLITE_PRIVATE void sqlite3WindowCodeStep(Parse*, Select*, WhereInfo*, int, int); SQLITE_PRIVATE int sqlite3WindowRewrite(Parse*, Select*); @@ -19770,8 +19565,8 @@ SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, u64); SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, u64); SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*); SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3*, void*); -SQLITE_PRIVATE int sqlite3MallocSize(void*); -SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, void*); +SQLITE_PRIVATE int sqlite3MallocSize(const void*); +SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, const void*); SQLITE_PRIVATE void *sqlite3PageMalloc(int); SQLITE_PRIVATE void sqlite3PageFree(void*); SQLITE_PRIVATE void sqlite3MemSetDefault(void); @@ -19887,9 +19682,10 @@ SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...); SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int); SQLITE_PRIVATE void sqlite3Dequote(char*); SQLITE_PRIVATE void sqlite3DequoteExpr(Expr*); +SQLITE_PRIVATE void sqlite3DequoteToken(Token*); SQLITE_PRIVATE void sqlite3TokenInit(Token*,char*); SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int); -SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*, char **); +SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*); SQLITE_PRIVATE void sqlite3FinishCoding(Parse*); SQLITE_PRIVATE int sqlite3GetTempReg(Parse*); SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int); @@ -19906,16 +19702,17 @@ SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*); SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*); SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse*,Expr*, Expr*); SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr*); -SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*, int); -SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,Expr*,FuncDef*); +SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, const Token*, int); +SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*); +SQLITE_PRIVATE Select *sqlite3ExprListToValues(Parse*, int, ExprList*); SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int,int); -SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int); +SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,const Token*,int); SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,const char*,const char*); SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*); SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*); @@ -19931,6 +19728,10 @@ SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3*); SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int); SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*); SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3*); +SQLITE_PRIVATE void sqlite3ColumnSetExpr(Parse*,Table*,Column*,Expr*); +SQLITE_PRIVATE Expr *sqlite3ColumnExpr(Table*,Column*); +SQLITE_PRIVATE void sqlite3ColumnSetColl(sqlite3*,Column*,const char*zColl); +SQLITE_PRIVATE const char *sqlite3ColumnColl(Column*); SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*); SQLITE_PRIVATE void sqlite3GenerateColumnNames(Parse *pParse, Select *pSelect); SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**); @@ -19952,14 +19753,14 @@ SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table*, Column*); #else # define sqlite3ColumnPropertiesFromName(T,C) /* no-op */ #endif -SQLITE_PRIVATE void sqlite3AddColumn(Parse*,Token*,Token*); +SQLITE_PRIVATE void sqlite3AddColumn(Parse*,Token,Token); SQLITE_PRIVATE void sqlite3AddNotNull(Parse*, int); SQLITE_PRIVATE void sqlite3AddPrimaryKey(Parse*, ExprList*, int, int, int); SQLITE_PRIVATE void sqlite3AddCheckConstraint(Parse*, Expr*, const char*, const char*); SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse*,Expr*,const char*,const char*); SQLITE_PRIVATE void sqlite3AddCollateType(Parse*, Token*); SQLITE_PRIVATE void sqlite3AddGenerated(Parse*,Expr*,Token*); -SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*); +SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u32,Select*); SQLITE_PRIVATE void sqlite3AddReturning(Parse*,ExprList*); SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*, sqlite3_vfs**,char**,char **); @@ -20045,10 +19846,12 @@ SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,char*); #endif +SQLITE_PRIVATE void sqlite3CodeChangeCount(Vdbe*,int,const char*); SQLITE_PRIVATE void sqlite3DeleteFrom(Parse*, SrcList*, Expr*, ExprList*, Expr*); SQLITE_PRIVATE void sqlite3Update(Parse*, SrcList*, ExprList*,Expr*,int,ExprList*,Expr*, Upsert*); -SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(Parse*,SrcList*,Expr*,ExprList*,ExprList*,u16,int); +SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(Parse*,SrcList*,Expr*,ExprList*, + ExprList*,Select*,u16,int); SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*); SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*); @@ -20069,7 +19872,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int); SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int); #ifndef SQLITE_OMIT_GENERATED_COLUMNS -SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn(Parse*, Column*, int); +SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn(Parse*, Table*, Column*, int); #endif SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int); @@ -20088,23 +19891,24 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*); #define LOCATE_VIEW 0x01 #define LOCATE_NOERR 0x02 SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,u32 flags,const char*, const char*); +SQLITE_PRIVATE const char *sqlite3PreferredTableName(const char*); SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,SrcItem *); SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3Vacuum(Parse*,Token*,Expr*); SQLITE_PRIVATE int sqlite3RunVacuum(char**, sqlite3*, int, sqlite3_value*); -SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3*, Token*); -SQLITE_PRIVATE int sqlite3ExprCompare(Parse*,Expr*, Expr*, int); -SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*, Expr*, int); -SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int); -SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Parse*,Expr*, Expr*, int); +SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3*, const Token*); +SQLITE_PRIVATE int sqlite3ExprCompare(const Parse*,const Expr*,const Expr*, int); +SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*,Expr*,int); +SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList*,const ExprList*, int); +SQLITE_PRIVATE int sqlite3ExprImpliesExpr(const Parse*,const Expr*,const Expr*, int); SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int); SQLITE_PRIVATE void sqlite3AggInfoPersistWalkerInit(Walker*,Parse*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(Expr*, int iCur, Index *pIdx); -SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*); +SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse*, Expr*, SrcList*); SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*); #ifndef SQLITE_UNTESTABLE SQLITE_PRIVATE void sqlite3PrngSaveState(void); @@ -20126,10 +19930,11 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*); SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); +SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr*,const SrcItem*); #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif -SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr*, int*); +SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr*, int*); SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*); SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char); SQLITE_PRIVATE int sqlite3IsRowid(const char*); @@ -20154,17 +19959,22 @@ SQLITE_PRIVATE void sqlite3MayAbort(Parse*); SQLITE_PRIVATE void sqlite3HaltConstraint(Parse*, int, int, char*, i8, u8); SQLITE_PRIVATE void sqlite3UniqueConstraint(Parse*, int, Index*); SQLITE_PRIVATE void sqlite3RowidConstraint(Parse*, int, Table*); -SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3*,Expr*,int); -SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int); -SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int); -SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,IdList*); -SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,Select*,int); +SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3*,const Expr*,int); +SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,const ExprList*,int); +SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,const SrcList*,int); +SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,const IdList*); +SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,const Select*,int); SQLITE_PRIVATE FuncDef *sqlite3FunctionSearch(int,const char*); SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs(FuncDef*,int); SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,u8,u8); +SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum*,sqlite3_value*); SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void); SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void); +SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void); SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3*); +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_JSON) +SQLITE_PRIVATE int sqlite3JsonTableFunctions(sqlite3*); +#endif SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3*); SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3*); SQLITE_PRIVATE void sqlite3ChangeCookie(Parse*, int); @@ -20254,14 +20064,8 @@ SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte); SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**); SQLITE_PRIVATE LogEst sqlite3LogEst(u64); SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst); -#ifndef SQLITE_OMIT_VIRTUALTABLE SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double); -#endif -#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \ - defined(SQLITE_ENABLE_STAT4) || \ - defined(SQLITE_EXPLAIN_ESTIMATED_ROWS) SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst); -#endif SQLITE_PRIVATE VList *sqlite3VListAdd(sqlite3*,VList*,const char*,int,int); SQLITE_PRIVATE const char *sqlite3VListNumToName(VList*,int); SQLITE_PRIVATE int sqlite3VListNameToNum(VList*,const char*,int); @@ -20296,7 +20100,7 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3*, Index*); SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int); SQLITE_PRIVATE char sqlite3CompareAffinity(const Expr *pExpr, char aff2); SQLITE_PRIVATE int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity); -SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table*,int); +SQLITE_PRIVATE char sqlite3TableColumnAffinity(const Table*,int); SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr); SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*); @@ -20325,14 +20129,14 @@ SQLITE_PRIVATE void sqlite3SetTextEncoding(sqlite3 *db, u8); SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr); SQLITE_PRIVATE CollSeq *sqlite3ExprNNCollSeq(Parse *pParse, const Expr *pExpr); SQLITE_PRIVATE int sqlite3ExprCollSeqMatch(Parse*,const Expr*,const Expr*); -SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr*, const Token*, int); -SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse*,Expr*,const char*); +SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(const Parse *pParse, Expr*, const Token*, int); +SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(const Parse*,Expr*,const char*); SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr*); SQLITE_PRIVATE Expr *sqlite3ExprSkipCollateAndLikely(Expr*); SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *, CollSeq *); SQLITE_PRIVATE int sqlite3WritableSchema(sqlite3*); SQLITE_PRIVATE int sqlite3CheckObjectName(Parse*, const char*,const char*,const char*); -SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *, int); +SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *, i64); SQLITE_PRIVATE int sqlite3AddInt64(i64*,i64); SQLITE_PRIVATE int sqlite3SubInt64(i64*,i64); SQLITE_PRIVATE int sqlite3MulInt64(i64*,i64); @@ -20357,11 +20161,15 @@ SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *); #ifndef SQLITE_OMIT_UTF16 SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *, const void*, int, u8); #endif -SQLITE_PRIVATE int sqlite3ValueFromExpr(sqlite3 *, Expr *, u8, u8, sqlite3_value **); +SQLITE_PRIVATE int sqlite3ValueFromExpr(sqlite3 *, const Expr *, u8, u8, sqlite3_value **); SQLITE_PRIVATE void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8); #ifndef SQLITE_AMALGAMATION SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[]; SQLITE_PRIVATE const char sqlite3StrBINARY[]; +SQLITE_PRIVATE const unsigned char sqlite3StdTypeLen[]; +SQLITE_PRIVATE const char sqlite3StdTypeAffinity[]; +SQLITE_PRIVATE const char sqlite3StdTypeMap[]; +SQLITE_PRIVATE const char *sqlite3StdType[]; SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[]; SQLITE_PRIVATE const unsigned char *sqlite3aLTb; SQLITE_PRIVATE const unsigned char *sqlite3aEQb; @@ -20405,9 +20213,9 @@ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int); SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *); SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *); -SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse*, SrcList*, Token*); -SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse*, void*, Token*); -SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse*, void *pTo, void *pFrom); +SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse*, SrcList*, const Token*); +SQLITE_PRIVATE const void *sqlite3RenameTokenMap(Parse*, const void*, const Token*); +SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse*, const void *pTo, const void *pFrom); SQLITE_PRIVATE void sqlite3RenameExprUnmap(Parse*, Expr*); SQLITE_PRIVATE void sqlite3RenameExprlistUnmap(Parse*, ExprList*); SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(Parse*, u8, CollSeq *, const char*); @@ -20444,15 +20252,20 @@ SQLITE_PRIVATE int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *, FuncDestructor *pDestructor ); SQLITE_PRIVATE void sqlite3NoopDestructor(void*); -SQLITE_PRIVATE void sqlite3OomFault(sqlite3*); +SQLITE_PRIVATE void *sqlite3OomFault(sqlite3*); SQLITE_PRIVATE void sqlite3OomClear(sqlite3*); SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int); SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *); SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int); +SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, int); SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*); +SQLITE_PRIVATE void sqlite3StrAccumSetError(StrAccum*, u8); +SQLITE_PRIVATE void sqlite3ResultStrAccum(sqlite3_context*,StrAccum*); SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest*,int,int); SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *, SrcList *, int, int); +SQLITE_PRIVATE void sqlite3RecordErrorByteOffset(sqlite3*,const char*); +SQLITE_PRIVATE void sqlite3RecordErrorOffsetOfExpr(sqlite3*,const Expr*); SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *); SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *); @@ -20503,7 +20316,7 @@ SQLITE_PRIVATE int sqlite3Utf8To8(unsigned char*); #endif #ifdef SQLITE_OMIT_VIRTUALTABLE -# define sqlite3VtabClear(Y) +# define sqlite3VtabClear(D,T) # define sqlite3VtabSync(X,Y) SQLITE_OK # define sqlite3VtabRollback(X) # define sqlite3VtabCommit(X) @@ -20540,9 +20353,11 @@ SQLITE_PRIVATE int sqlite3ReadOnlyShadowTables(sqlite3 *db); #ifndef SQLITE_OMIT_VIRTUALTABLE SQLITE_PRIVATE int sqlite3ShadowTableName(sqlite3 *db, const char *zName); SQLITE_PRIVATE int sqlite3IsShadowTableOf(sqlite3*,Table*,const char*); +SQLITE_PRIVATE void sqlite3MarkAllShadowTablesOf(sqlite3*, Table*); #else # define sqlite3ShadowTableName(A,B) 0 # define sqlite3IsShadowTableOf(A,B,C) 0 +# define sqlite3MarkAllShadowTablesOf(A,B) #endif SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse*,Module*); SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3*,Module*); @@ -20555,11 +20370,17 @@ SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3*, int, const char *, char **); SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse*, Table*); SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *); SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *); + SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); +#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ + && !defined(SQLITE_OMIT_VIRTUALTABLE) +SQLITE_PRIVATE void sqlite3VtabWriteAll(sqlite3_index_info*); +#endif SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); -SQLITE_PRIVATE void sqlite3ParserReset(Parse*); +SQLITE_PRIVATE void sqlite3ParseObjectInit(Parse*,sqlite3*); +SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse*); SQLITE_PRIVATE void *sqlite3ParserAddCleanup(Parse*,void(*)(sqlite3*,void*),void*); #ifdef SQLITE_ENABLE_NORMALIZE SQLITE_PRIVATE char *sqlite3Normalize(Vdbe*, const char*); @@ -20585,7 +20406,7 @@ SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8); # define sqlite3CteDelete(D,C) # define sqlite3CteWithAdd(P,W,C) ((void*)0) # define sqlite3WithDelete(x,y) -# define sqlite3WithPush(x,y,z) +# define sqlite3WithPush(x,y,z) ((void*)0) #endif #ifndef SQLITE_OMIT_UPSERT SQLITE_PRIVATE Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*,Upsert*); @@ -20618,6 +20439,7 @@ SQLITE_PRIVATE void sqlite3FkActions(Parse*, Table*, ExprList*, int, int*, int SQLITE_PRIVATE int sqlite3FkRequired(Parse*, Table*, int*, int); SQLITE_PRIVATE u32 sqlite3FkOldmask(Parse*, Table*); SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *); +SQLITE_PRIVATE void sqlite3FkClearTriggerCache(sqlite3*,int); #else #define sqlite3FkActions(a,b,c,d,e,f) #define sqlite3FkCheck(a,b,c,d,e,f) @@ -20625,6 +20447,7 @@ SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *); #define sqlite3FkOldmask(a,b) 0 #define sqlite3FkRequired(a,b,c,d) 0 #define sqlite3FkReferences(a) 0 + #define sqlite3FkClearTriggerCache(a,b) #endif #ifndef SQLITE_OMIT_FOREIGN_KEY SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *, Table*); @@ -20682,7 +20505,7 @@ SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *); SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p); #if SQLITE_MAX_EXPR_DEPTH>0 -SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *); +SQLITE_PRIVATE int sqlite3SelectExprHeight(const Select *); SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int); #else #define sqlite3SelectExprHeight(x) 0 @@ -20753,8 +20576,8 @@ SQLITE_API SQLITE_EXTERN void (SQLITE_CDECL *sqlite3IoTrace)(const char*,...); */ #ifdef SQLITE_MEMDEBUG SQLITE_PRIVATE void sqlite3MemdebugSetType(void*,u8); -SQLITE_PRIVATE int sqlite3MemdebugHasType(void*,u8); -SQLITE_PRIVATE int sqlite3MemdebugNoType(void*,u8); +SQLITE_PRIVATE int sqlite3MemdebugHasType(const void*,u8); +SQLITE_PRIVATE int sqlite3MemdebugNoType(const void*,u8); #else # define sqlite3MemdebugSetType(X,Y) /* no-op */ # define sqlite3MemdebugHasType(X,Y) 1 @@ -20779,10 +20602,10 @@ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3*); SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*); #endif -SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr); -SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr); +SQLITE_PRIVATE int sqlite3ExprVectorSize(const Expr *pExpr); +SQLITE_PRIVATE int sqlite3ExprIsVector(const Expr *pExpr); SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr*, int); -SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(Parse*,Expr*,int); +SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(Parse*,Expr*,int,int); SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse*, Expr*); #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS @@ -20792,6 +20615,993 @@ SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt); #endif /* SQLITEINT_H */ /************** End of sqliteInt.h *******************************************/ +/************** Begin file os_common.h ***************************************/ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains macros and a little bit of code that is common to +** all of the platform-specific files (os_*.c) and is #included into those +** files. +** +** This file should be #included by the os_*.c files only. It is not a +** general purpose header file. +*/ +#ifndef _OS_COMMON_H_ +#define _OS_COMMON_H_ + +/* +** At least two bugs have slipped in because we changed the MEMORY_DEBUG +** macro to SQLITE_DEBUG and some older makefiles have not yet made the +** switch. The following code should catch this problem at compile-time. +*/ +#ifdef MEMORY_DEBUG +# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." +#endif + +/* +** Macros for performance tracing. Normally turned off. Only works +** on i486 hardware. +*/ +#ifdef SQLITE_PERFORMANCE_TRACE + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +/************** Include hwtime.h in the middle of os_common.h ****************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 and x86_64 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if !defined(__STRICT_ANSI__) && \ + (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long val; + __asm__ __volatile__ ("rdtsc" : "=A" (val)); + return val; + } + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + /* + ** asm() is needed for hardware timing support. Without asm(), + ** disable the sqlite3Hwtime() routine. + ** + ** sqlite3Hwtime() is only used for some obscure debugging + ** and analysis configurations, not in any deliverable, so this + ** should not be a great loss. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in os_common.h ******************/ + +static sqlite_uint64 g_start; +static sqlite_uint64 g_elapsed; +#define TIMER_START g_start=sqlite3Hwtime() +#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start +#define TIMER_ELAPSED g_elapsed +#else +#define TIMER_START +#define TIMER_END +#define TIMER_ELAPSED ((sqlite_uint64)0) +#endif + +/* +** If we compile with the SQLITE_TEST macro set, then the following block +** of code will give us the ability to simulate a disk I/O error. This +** is used for testing the I/O recovery logic. +*/ +#if defined(SQLITE_TEST) +SQLITE_API extern int sqlite3_io_error_hit; +SQLITE_API extern int sqlite3_io_error_hardhit; +SQLITE_API extern int sqlite3_io_error_pending; +SQLITE_API extern int sqlite3_io_error_persist; +SQLITE_API extern int sqlite3_io_error_benign; +SQLITE_API extern int sqlite3_diskfull_pending; +SQLITE_API extern int sqlite3_diskfull; +#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) +#define SimulateIOError(CODE) \ + if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ + || sqlite3_io_error_pending-- == 1 ) \ + { local_ioerr(); CODE; } +static void local_ioerr(){ + IOTRACE(("IOERR\n")); + sqlite3_io_error_hit++; + if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; +} +#define SimulateDiskfullError(CODE) \ + if( sqlite3_diskfull_pending ){ \ + if( sqlite3_diskfull_pending == 1 ){ \ + local_ioerr(); \ + sqlite3_diskfull = 1; \ + sqlite3_io_error_hit = 1; \ + CODE; \ + }else{ \ + sqlite3_diskfull_pending--; \ + } \ + } +#else +#define SimulateIOErrorBenign(X) +#define SimulateIOError(A) +#define SimulateDiskfullError(A) +#endif /* defined(SQLITE_TEST) */ + +/* +** When testing, keep a count of the number of open files. +*/ +#if defined(SQLITE_TEST) +SQLITE_API extern int sqlite3_open_file_count; +#define OpenCounter(X) sqlite3_open_file_count+=(X) +#else +#define OpenCounter(X) +#endif /* defined(SQLITE_TEST) */ + +#endif /* !defined(_OS_COMMON_H_) */ + +/************** End of os_common.h *******************************************/ +/************** Begin file ctime.c *******************************************/ +/* DO NOT EDIT! +** This file is automatically generated by the script in the canonical +** SQLite source tree at tool/mkctimec.tcl. +** +** To modify this header, edit any of the various lists in that script +** which specify categories of generated conditionals in this file. +*/ + +/* +** 2010 February 23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file implements routines used to report what compile-time options +** SQLite was built with. +*/ +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS /* IMP: R-16824-07538 */ + +/* +** Include the configuration header output by 'configure' if we're using the +** autoconf-based build +*/ +#if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H) +/* #include "config.h" */ +#define SQLITECONFIG_H 1 +#endif + +/* These macros are provided to "stringify" the value of the define +** for those options in which the value is meaningful. */ +#define CTIMEOPT_VAL_(opt) #opt +#define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt) + +/* Like CTIMEOPT_VAL, but especially for SQLITE_DEFAULT_LOOKASIDE. This +** option requires a separate macro because legal values contain a single +** comma. e.g. (-DSQLITE_DEFAULT_LOOKASIDE="100,100") */ +#define CTIMEOPT_VAL2_(opt1,opt2) #opt1 "," #opt2 +#define CTIMEOPT_VAL2(opt) CTIMEOPT_VAL2_(opt) +/* #include "sqliteInt.h" */ + +/* +** An array of names of all compile-time options. This array should +** be sorted A-Z. +** +** This array looks large, but in a typical installation actually uses +** only a handful of compile-time options, so most times this array is usually +** rather short and uses little memory space. +*/ +static const char * const sqlite3azCompileOpt[] = { + +#ifdef SQLITE_32BIT_ROWID + "32BIT_ROWID", +#endif +#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC + "4_BYTE_ALIGNED_MALLOC", +#endif +#ifdef SQLITE_64BIT_STATS + "64BIT_STATS", +#endif +#ifdef SQLITE_ALLOW_COVERING_INDEX_SCAN +# if SQLITE_ALLOW_COVERING_INDEX_SCAN != 1 + "ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN), +# endif +#endif +#ifdef SQLITE_ALLOW_URI_AUTHORITY + "ALLOW_URI_AUTHORITY", +#endif +#ifdef SQLITE_ATOMIC_INTRINSICS + "ATOMIC_INTRINSICS=" CTIMEOPT_VAL(SQLITE_ATOMIC_INTRINSICS), +#endif +#ifdef SQLITE_BITMASK_TYPE + "BITMASK_TYPE=" CTIMEOPT_VAL(SQLITE_BITMASK_TYPE), +#endif +#ifdef SQLITE_BUG_COMPATIBLE_20160819 + "BUG_COMPATIBLE_20160819", +#endif +#ifdef SQLITE_CASE_SENSITIVE_LIKE + "CASE_SENSITIVE_LIKE", +#endif +#ifdef SQLITE_CHECK_PAGES + "CHECK_PAGES", +#endif +#if defined(__clang__) && defined(__clang_major__) + "COMPILER=clang-" CTIMEOPT_VAL(__clang_major__) "." + CTIMEOPT_VAL(__clang_minor__) "." + CTIMEOPT_VAL(__clang_patchlevel__), +#elif defined(_MSC_VER) + "COMPILER=msvc-" CTIMEOPT_VAL(_MSC_VER), +#elif defined(__GNUC__) && defined(__VERSION__) + "COMPILER=gcc-" __VERSION__, +#endif +#ifdef SQLITE_COVERAGE_TEST + "COVERAGE_TEST", +#endif +#ifdef SQLITE_DEBUG + "DEBUG", +#endif +#ifdef SQLITE_DEFAULT_AUTOMATIC_INDEX + "DEFAULT_AUTOMATIC_INDEX", +#endif +#ifdef SQLITE_DEFAULT_AUTOVACUUM + "DEFAULT_AUTOVACUUM", +#endif +#ifdef SQLITE_DEFAULT_CACHE_SIZE + "DEFAULT_CACHE_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_CACHE_SIZE), +#endif +#ifdef SQLITE_DEFAULT_CKPTFULLFSYNC + "DEFAULT_CKPTFULLFSYNC", +#endif +#ifdef SQLITE_DEFAULT_FILE_FORMAT + "DEFAULT_FILE_FORMAT=" CTIMEOPT_VAL(SQLITE_DEFAULT_FILE_FORMAT), +#endif +#ifdef SQLITE_DEFAULT_FILE_PERMISSIONS + "DEFAULT_FILE_PERMISSIONS=" CTIMEOPT_VAL(SQLITE_DEFAULT_FILE_PERMISSIONS), +#endif +#ifdef SQLITE_DEFAULT_FOREIGN_KEYS + "DEFAULT_FOREIGN_KEYS", +#endif +#ifdef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT + "DEFAULT_JOURNAL_SIZE_LIMIT=" CTIMEOPT_VAL(SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT), +#endif +#ifdef SQLITE_DEFAULT_LOCKING_MODE + "DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE), +#endif +#ifdef SQLITE_DEFAULT_LOOKASIDE + "DEFAULT_LOOKASIDE=" CTIMEOPT_VAL2(SQLITE_DEFAULT_LOOKASIDE), +#endif +#ifdef SQLITE_DEFAULT_MEMSTATUS +# if SQLITE_DEFAULT_MEMSTATUS != 1 + "DEFAULT_MEMSTATUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_MEMSTATUS), +# endif +#endif +#ifdef SQLITE_DEFAULT_MMAP_SIZE + "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE), +#endif +#ifdef SQLITE_DEFAULT_PAGE_SIZE + "DEFAULT_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_PAGE_SIZE), +#endif +#ifdef SQLITE_DEFAULT_PCACHE_INITSZ + "DEFAULT_PCACHE_INITSZ=" CTIMEOPT_VAL(SQLITE_DEFAULT_PCACHE_INITSZ), +#endif +#ifdef SQLITE_DEFAULT_PROXYDIR_PERMISSIONS + "DEFAULT_PROXYDIR_PERMISSIONS=" CTIMEOPT_VAL(SQLITE_DEFAULT_PROXYDIR_PERMISSIONS), +#endif +#ifdef SQLITE_DEFAULT_RECURSIVE_TRIGGERS + "DEFAULT_RECURSIVE_TRIGGERS", +#endif +#ifdef SQLITE_DEFAULT_ROWEST + "DEFAULT_ROWEST=" CTIMEOPT_VAL(SQLITE_DEFAULT_ROWEST), +#endif +#ifdef SQLITE_DEFAULT_SECTOR_SIZE + "DEFAULT_SECTOR_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_SECTOR_SIZE), +#endif +#ifdef SQLITE_DEFAULT_SYNCHRONOUS + "DEFAULT_SYNCHRONOUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_SYNCHRONOUS), +#endif +#ifdef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT + "DEFAULT_WAL_AUTOCHECKPOINT=" CTIMEOPT_VAL(SQLITE_DEFAULT_WAL_AUTOCHECKPOINT), +#endif +#ifdef SQLITE_DEFAULT_WAL_SYNCHRONOUS + "DEFAULT_WAL_SYNCHRONOUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_WAL_SYNCHRONOUS), +#endif +#ifdef SQLITE_DEFAULT_WORKER_THREADS + "DEFAULT_WORKER_THREADS=" CTIMEOPT_VAL(SQLITE_DEFAULT_WORKER_THREADS), +#endif +#ifdef SQLITE_DIRECT_OVERFLOW_READ + "DIRECT_OVERFLOW_READ", +#endif +#ifdef SQLITE_DISABLE_DIRSYNC + "DISABLE_DIRSYNC", +#endif +#ifdef SQLITE_DISABLE_FTS3_UNICODE + "DISABLE_FTS3_UNICODE", +#endif +#ifdef SQLITE_DISABLE_FTS4_DEFERRED + "DISABLE_FTS4_DEFERRED", +#endif +#ifdef SQLITE_DISABLE_INTRINSIC + "DISABLE_INTRINSIC", +#endif +#ifdef SQLITE_DISABLE_LFS + "DISABLE_LFS", +#endif +#ifdef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS + "DISABLE_PAGECACHE_OVERFLOW_STATS", +#endif +#ifdef SQLITE_DISABLE_SKIPAHEAD_DISTINCT + "DISABLE_SKIPAHEAD_DISTINCT", +#endif +#ifdef SQLITE_ENABLE_8_3_NAMES + "ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES), +#endif +#ifdef SQLITE_ENABLE_API_ARMOR + "ENABLE_API_ARMOR", +#endif +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + "ENABLE_ATOMIC_WRITE", +#endif +#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE + "ENABLE_BATCH_ATOMIC_WRITE", +#endif +#ifdef SQLITE_ENABLE_BYTECODE_VTAB + "ENABLE_BYTECODE_VTAB", +#endif +#ifdef SQLITE_ENABLE_CEROD + "ENABLE_CEROD=" CTIMEOPT_VAL(SQLITE_ENABLE_CEROD), +#endif +#ifdef SQLITE_ENABLE_COLUMN_METADATA + "ENABLE_COLUMN_METADATA", +#endif +#ifdef SQLITE_ENABLE_COLUMN_USED_MASK + "ENABLE_COLUMN_USED_MASK", +#endif +#ifdef SQLITE_ENABLE_COSTMULT + "ENABLE_COSTMULT", +#endif +#ifdef SQLITE_ENABLE_CURSOR_HINTS + "ENABLE_CURSOR_HINTS", +#endif +#ifdef SQLITE_ENABLE_DBPAGE_VTAB + "ENABLE_DBPAGE_VTAB", +#endif +#ifdef SQLITE_ENABLE_DBSTAT_VTAB + "ENABLE_DBSTAT_VTAB", +#endif +#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT + "ENABLE_EXPENSIVE_ASSERT", +#endif +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + "ENABLE_EXPLAIN_COMMENTS", +#endif +#ifdef SQLITE_ENABLE_FTS3 + "ENABLE_FTS3", +#endif +#ifdef SQLITE_ENABLE_FTS3_PARENTHESIS + "ENABLE_FTS3_PARENTHESIS", +#endif +#ifdef SQLITE_ENABLE_FTS3_TOKENIZER + "ENABLE_FTS3_TOKENIZER", +#endif +#ifdef SQLITE_ENABLE_FTS4 + "ENABLE_FTS4", +#endif +#ifdef SQLITE_ENABLE_FTS5 + "ENABLE_FTS5", +#endif +#ifdef SQLITE_ENABLE_GEOPOLY + "ENABLE_GEOPOLY", +#endif +#ifdef SQLITE_ENABLE_HIDDEN_COLUMNS + "ENABLE_HIDDEN_COLUMNS", +#endif +#ifdef SQLITE_ENABLE_ICU + "ENABLE_ICU", +#endif +#ifdef SQLITE_ENABLE_IOTRACE + "ENABLE_IOTRACE", +#endif +#ifdef SQLITE_ENABLE_LOAD_EXTENSION + "ENABLE_LOAD_EXTENSION", +#endif +#ifdef SQLITE_ENABLE_LOCKING_STYLE + "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE), +#endif +#ifdef SQLITE_ENABLE_MATH_FUNCTIONS + "ENABLE_MATH_FUNCTIONS", +#endif +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + "ENABLE_MEMORY_MANAGEMENT", +#endif +#ifdef SQLITE_ENABLE_MEMSYS3 + "ENABLE_MEMSYS3", +#endif +#ifdef SQLITE_ENABLE_MEMSYS5 + "ENABLE_MEMSYS5", +#endif +#ifdef SQLITE_ENABLE_MULTIPLEX + "ENABLE_MULTIPLEX", +#endif +#ifdef SQLITE_ENABLE_NORMALIZE + "ENABLE_NORMALIZE", +#endif +#ifdef SQLITE_ENABLE_NULL_TRIM + "ENABLE_NULL_TRIM", +#endif +#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC + "ENABLE_OFFSET_SQL_FUNC", +#endif +#ifdef SQLITE_ENABLE_OVERSIZE_CELL_CHECK + "ENABLE_OVERSIZE_CELL_CHECK", +#endif +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + "ENABLE_PREUPDATE_HOOK", +#endif +#ifdef SQLITE_ENABLE_QPSG + "ENABLE_QPSG", +#endif +#ifdef SQLITE_ENABLE_RBU + "ENABLE_RBU", +#endif +#ifdef SQLITE_ENABLE_RTREE + "ENABLE_RTREE", +#endif +#ifdef SQLITE_ENABLE_SELECTTRACE + "ENABLE_SELECTTRACE", +#endif +#ifdef SQLITE_ENABLE_SESSION + "ENABLE_SESSION", +#endif +#ifdef SQLITE_ENABLE_SNAPSHOT + "ENABLE_SNAPSHOT", +#endif +#ifdef SQLITE_ENABLE_SORTER_REFERENCES + "ENABLE_SORTER_REFERENCES", +#endif +#ifdef SQLITE_ENABLE_SQLLOG + "ENABLE_SQLLOG", +#endif +#ifdef SQLITE_ENABLE_STAT4 + "ENABLE_STAT4", +#endif +#ifdef SQLITE_ENABLE_STMTVTAB + "ENABLE_STMTVTAB", +#endif +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + "ENABLE_STMT_SCANSTATUS", +#endif +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + "ENABLE_UNKNOWN_SQL_FUNCTION", +#endif +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY + "ENABLE_UNLOCK_NOTIFY", +#endif +#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT + "ENABLE_UPDATE_DELETE_LIMIT", +#endif +#ifdef SQLITE_ENABLE_URI_00_ERROR + "ENABLE_URI_00_ERROR", +#endif +#ifdef SQLITE_ENABLE_VFSTRACE + "ENABLE_VFSTRACE", +#endif +#ifdef SQLITE_ENABLE_WHERETRACE + "ENABLE_WHERETRACE", +#endif +#ifdef SQLITE_ENABLE_ZIPVFS + "ENABLE_ZIPVFS", +#endif +#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS + "EXPLAIN_ESTIMATED_ROWS", +#endif +#ifdef SQLITE_EXTRA_IFNULLROW + "EXTRA_IFNULLROW", +#endif +#ifdef SQLITE_EXTRA_INIT + "EXTRA_INIT=" CTIMEOPT_VAL(SQLITE_EXTRA_INIT), +#endif +#ifdef SQLITE_EXTRA_SHUTDOWN + "EXTRA_SHUTDOWN=" CTIMEOPT_VAL(SQLITE_EXTRA_SHUTDOWN), +#endif +#ifdef SQLITE_FTS3_MAX_EXPR_DEPTH + "FTS3_MAX_EXPR_DEPTH=" CTIMEOPT_VAL(SQLITE_FTS3_MAX_EXPR_DEPTH), +#endif +#ifdef SQLITE_FTS5_ENABLE_TEST_MI + "FTS5_ENABLE_TEST_MI", +#endif +#ifdef SQLITE_FTS5_NO_WITHOUT_ROWID + "FTS5_NO_WITHOUT_ROWID", +#endif +#if HAVE_ISNAN || SQLITE_HAVE_ISNAN + "HAVE_ISNAN", +#endif +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX +# if SQLITE_HOMEGROWN_RECURSIVE_MUTEX != 1 + "HOMEGROWN_RECURSIVE_MUTEX=" CTIMEOPT_VAL(SQLITE_HOMEGROWN_RECURSIVE_MUTEX), +# endif +#endif +#ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS + "IGNORE_AFP_LOCK_ERRORS", +#endif +#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS + "IGNORE_FLOCK_LOCK_ERRORS", +#endif +#ifdef SQLITE_INLINE_MEMCPY + "INLINE_MEMCPY", +#endif +#ifdef SQLITE_INT64_TYPE + "INT64_TYPE", +#endif +#ifdef SQLITE_INTEGRITY_CHECK_ERROR_MAX + "INTEGRITY_CHECK_ERROR_MAX=" CTIMEOPT_VAL(SQLITE_INTEGRITY_CHECK_ERROR_MAX), +#endif +#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS + "LIKE_DOESNT_MATCH_BLOBS", +#endif +#ifdef SQLITE_LOCK_TRACE + "LOCK_TRACE", +#endif +#ifdef SQLITE_LOG_CACHE_SPILL + "LOG_CACHE_SPILL", +#endif +#ifdef SQLITE_MALLOC_SOFT_LIMIT + "MALLOC_SOFT_LIMIT=" CTIMEOPT_VAL(SQLITE_MALLOC_SOFT_LIMIT), +#endif +#ifdef SQLITE_MAX_ATTACHED + "MAX_ATTACHED=" CTIMEOPT_VAL(SQLITE_MAX_ATTACHED), +#endif +#ifdef SQLITE_MAX_COLUMN + "MAX_COLUMN=" CTIMEOPT_VAL(SQLITE_MAX_COLUMN), +#endif +#ifdef SQLITE_MAX_COMPOUND_SELECT + "MAX_COMPOUND_SELECT=" CTIMEOPT_VAL(SQLITE_MAX_COMPOUND_SELECT), +#endif +#ifdef SQLITE_MAX_DEFAULT_PAGE_SIZE + "MAX_DEFAULT_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_DEFAULT_PAGE_SIZE), +#endif +#ifdef SQLITE_MAX_EXPR_DEPTH + "MAX_EXPR_DEPTH=" CTIMEOPT_VAL(SQLITE_MAX_EXPR_DEPTH), +#endif +#ifdef SQLITE_MAX_FUNCTION_ARG + "MAX_FUNCTION_ARG=" CTIMEOPT_VAL(SQLITE_MAX_FUNCTION_ARG), +#endif +#ifdef SQLITE_MAX_LENGTH + "MAX_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_LENGTH), +#endif +#ifdef SQLITE_MAX_LIKE_PATTERN_LENGTH + "MAX_LIKE_PATTERN_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_LIKE_PATTERN_LENGTH), +#endif +#ifdef SQLITE_MAX_MEMORY + "MAX_MEMORY=" CTIMEOPT_VAL(SQLITE_MAX_MEMORY), +#endif +#ifdef SQLITE_MAX_MMAP_SIZE + "MAX_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE), +#endif +#ifdef SQLITE_MAX_MMAP_SIZE_ + "MAX_MMAP_SIZE_=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE_), +#endif +#ifdef SQLITE_MAX_PAGE_COUNT + "MAX_PAGE_COUNT=" CTIMEOPT_VAL(SQLITE_MAX_PAGE_COUNT), +#endif +#ifdef SQLITE_MAX_PAGE_SIZE + "MAX_PAGE_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_PAGE_SIZE), +#endif +#ifdef SQLITE_MAX_SCHEMA_RETRY + "MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY), +#endif +#ifdef SQLITE_MAX_SQL_LENGTH + "MAX_SQL_LENGTH=" CTIMEOPT_VAL(SQLITE_MAX_SQL_LENGTH), +#endif +#ifdef SQLITE_MAX_TRIGGER_DEPTH + "MAX_TRIGGER_DEPTH=" CTIMEOPT_VAL(SQLITE_MAX_TRIGGER_DEPTH), +#endif +#ifdef SQLITE_MAX_VARIABLE_NUMBER + "MAX_VARIABLE_NUMBER=" CTIMEOPT_VAL(SQLITE_MAX_VARIABLE_NUMBER), +#endif +#ifdef SQLITE_MAX_VDBE_OP + "MAX_VDBE_OP=" CTIMEOPT_VAL(SQLITE_MAX_VDBE_OP), +#endif +#ifdef SQLITE_MAX_WORKER_THREADS + "MAX_WORKER_THREADS=" CTIMEOPT_VAL(SQLITE_MAX_WORKER_THREADS), +#endif +#ifdef SQLITE_MEMDEBUG + "MEMDEBUG", +#endif +#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT + "MIXED_ENDIAN_64BIT_FLOAT", +#endif +#ifdef SQLITE_MMAP_READWRITE + "MMAP_READWRITE", +#endif +#ifdef SQLITE_MUTEX_NOOP + "MUTEX_NOOP", +#endif +#ifdef SQLITE_MUTEX_OMIT + "MUTEX_OMIT", +#endif +#ifdef SQLITE_MUTEX_PTHREADS + "MUTEX_PTHREADS", +#endif +#ifdef SQLITE_MUTEX_W32 + "MUTEX_W32", +#endif +#ifdef SQLITE_NEED_ERR_NAME + "NEED_ERR_NAME", +#endif +#ifdef SQLITE_NO_SYNC + "NO_SYNC", +#endif +#ifdef SQLITE_OMIT_ALTERTABLE + "OMIT_ALTERTABLE", +#endif +#ifdef SQLITE_OMIT_ANALYZE + "OMIT_ANALYZE", +#endif +#ifdef SQLITE_OMIT_ATTACH + "OMIT_ATTACH", +#endif +#ifdef SQLITE_OMIT_AUTHORIZATION + "OMIT_AUTHORIZATION", +#endif +#ifdef SQLITE_OMIT_AUTOINCREMENT + "OMIT_AUTOINCREMENT", +#endif +#ifdef SQLITE_OMIT_AUTOINIT + "OMIT_AUTOINIT", +#endif +#ifdef SQLITE_OMIT_AUTOMATIC_INDEX + "OMIT_AUTOMATIC_INDEX", +#endif +#ifdef SQLITE_OMIT_AUTORESET + "OMIT_AUTORESET", +#endif +#ifdef SQLITE_OMIT_AUTOVACUUM + "OMIT_AUTOVACUUM", +#endif +#ifdef SQLITE_OMIT_BETWEEN_OPTIMIZATION + "OMIT_BETWEEN_OPTIMIZATION", +#endif +#ifdef SQLITE_OMIT_BLOB_LITERAL + "OMIT_BLOB_LITERAL", +#endif +#ifdef SQLITE_OMIT_CAST + "OMIT_CAST", +#endif +#ifdef SQLITE_OMIT_CHECK + "OMIT_CHECK", +#endif +#ifdef SQLITE_OMIT_COMPLETE + "OMIT_COMPLETE", +#endif +#ifdef SQLITE_OMIT_COMPOUND_SELECT + "OMIT_COMPOUND_SELECT", +#endif +#ifdef SQLITE_OMIT_CONFLICT_CLAUSE + "OMIT_CONFLICT_CLAUSE", +#endif +#ifdef SQLITE_OMIT_CTE + "OMIT_CTE", +#endif +#if defined(SQLITE_OMIT_DATETIME_FUNCS) || defined(SQLITE_OMIT_FLOATING_POINT) + "OMIT_DATETIME_FUNCS", +#endif +#ifdef SQLITE_OMIT_DECLTYPE + "OMIT_DECLTYPE", +#endif +#ifdef SQLITE_OMIT_DEPRECATED + "OMIT_DEPRECATED", +#endif +#ifdef SQLITE_OMIT_DESERIALIZE + "OMIT_DESERIALIZE", +#endif +#ifdef SQLITE_OMIT_DISKIO + "OMIT_DISKIO", +#endif +#ifdef SQLITE_OMIT_EXPLAIN + "OMIT_EXPLAIN", +#endif +#ifdef SQLITE_OMIT_FLAG_PRAGMAS + "OMIT_FLAG_PRAGMAS", +#endif +#ifdef SQLITE_OMIT_FLOATING_POINT + "OMIT_FLOATING_POINT", +#endif +#ifdef SQLITE_OMIT_FOREIGN_KEY + "OMIT_FOREIGN_KEY", +#endif +#ifdef SQLITE_OMIT_GET_TABLE + "OMIT_GET_TABLE", +#endif +#ifdef SQLITE_OMIT_HEX_INTEGER + "OMIT_HEX_INTEGER", +#endif +#ifdef SQLITE_OMIT_INCRBLOB + "OMIT_INCRBLOB", +#endif +#ifdef SQLITE_OMIT_INTEGRITY_CHECK + "OMIT_INTEGRITY_CHECK", +#endif +#ifdef SQLITE_OMIT_INTROSPECTION_PRAGMAS + "OMIT_INTROSPECTION_PRAGMAS", +#endif +#ifdef SQLITE_OMIT_JSON + "OMIT_JSON", +#endif +#ifdef SQLITE_OMIT_LIKE_OPTIMIZATION + "OMIT_LIKE_OPTIMIZATION", +#endif +#ifdef SQLITE_OMIT_LOAD_EXTENSION + "OMIT_LOAD_EXTENSION", +#endif +#ifdef SQLITE_OMIT_LOCALTIME + "OMIT_LOCALTIME", +#endif +#ifdef SQLITE_OMIT_LOOKASIDE + "OMIT_LOOKASIDE", +#endif +#ifdef SQLITE_OMIT_MEMORYDB + "OMIT_MEMORYDB", +#endif +#ifdef SQLITE_OMIT_OR_OPTIMIZATION + "OMIT_OR_OPTIMIZATION", +#endif +#ifdef SQLITE_OMIT_PAGER_PRAGMAS + "OMIT_PAGER_PRAGMAS", +#endif +#ifdef SQLITE_OMIT_PARSER_TRACE + "OMIT_PARSER_TRACE", +#endif +#ifdef SQLITE_OMIT_POPEN + "OMIT_POPEN", +#endif +#ifdef SQLITE_OMIT_PRAGMA + "OMIT_PRAGMA", +#endif +#ifdef SQLITE_OMIT_PROGRESS_CALLBACK + "OMIT_PROGRESS_CALLBACK", +#endif +#ifdef SQLITE_OMIT_QUICKBALANCE + "OMIT_QUICKBALANCE", +#endif +#ifdef SQLITE_OMIT_REINDEX + "OMIT_REINDEX", +#endif +#ifdef SQLITE_OMIT_SCHEMA_PRAGMAS + "OMIT_SCHEMA_PRAGMAS", +#endif +#ifdef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS + "OMIT_SCHEMA_VERSION_PRAGMAS", +#endif +#ifdef SQLITE_OMIT_SHARED_CACHE + "OMIT_SHARED_CACHE", +#endif +#ifdef SQLITE_OMIT_SHUTDOWN_DIRECTORIES + "OMIT_SHUTDOWN_DIRECTORIES", +#endif +#ifdef SQLITE_OMIT_SUBQUERY + "OMIT_SUBQUERY", +#endif +#ifdef SQLITE_OMIT_TCL_VARIABLE + "OMIT_TCL_VARIABLE", +#endif +#ifdef SQLITE_OMIT_TEMPDB + "OMIT_TEMPDB", +#endif +#ifdef SQLITE_OMIT_TEST_CONTROL + "OMIT_TEST_CONTROL", +#endif +#ifdef SQLITE_OMIT_TRACE +# if SQLITE_OMIT_TRACE != 1 + "OMIT_TRACE=" CTIMEOPT_VAL(SQLITE_OMIT_TRACE), +# endif +#endif +#ifdef SQLITE_OMIT_TRIGGER + "OMIT_TRIGGER", +#endif +#ifdef SQLITE_OMIT_TRUNCATE_OPTIMIZATION + "OMIT_TRUNCATE_OPTIMIZATION", +#endif +#ifdef SQLITE_OMIT_UTF16 + "OMIT_UTF16", +#endif +#ifdef SQLITE_OMIT_VACUUM + "OMIT_VACUUM", +#endif +#ifdef SQLITE_OMIT_VIEW + "OMIT_VIEW", +#endif +#ifdef SQLITE_OMIT_VIRTUALTABLE + "OMIT_VIRTUALTABLE", +#endif +#ifdef SQLITE_OMIT_WAL + "OMIT_WAL", +#endif +#ifdef SQLITE_OMIT_WSD + "OMIT_WSD", +#endif +#ifdef SQLITE_OMIT_XFER_OPT + "OMIT_XFER_OPT", +#endif +#ifdef SQLITE_PCACHE_SEPARATE_HEADER + "PCACHE_SEPARATE_HEADER", +#endif +#ifdef SQLITE_PERFORMANCE_TRACE + "PERFORMANCE_TRACE", +#endif +#ifdef SQLITE_POWERSAFE_OVERWRITE +# if SQLITE_POWERSAFE_OVERWRITE != 1 + "POWERSAFE_OVERWRITE=" CTIMEOPT_VAL(SQLITE_POWERSAFE_OVERWRITE), +# endif +#endif +#ifdef SQLITE_PREFER_PROXY_LOCKING + "PREFER_PROXY_LOCKING", +#endif +#ifdef SQLITE_PROXY_DEBUG + "PROXY_DEBUG", +#endif +#ifdef SQLITE_REVERSE_UNORDERED_SELECTS + "REVERSE_UNORDERED_SELECTS", +#endif +#ifdef SQLITE_RTREE_INT_ONLY + "RTREE_INT_ONLY", +#endif +#ifdef SQLITE_SECURE_DELETE + "SECURE_DELETE", +#endif +#ifdef SQLITE_SMALL_STACK + "SMALL_STACK", +#endif +#ifdef SQLITE_SORTER_PMASZ + "SORTER_PMASZ=" CTIMEOPT_VAL(SQLITE_SORTER_PMASZ), +#endif +#ifdef SQLITE_SOUNDEX + "SOUNDEX", +#endif +#ifdef SQLITE_STAT4_SAMPLES + "STAT4_SAMPLES=" CTIMEOPT_VAL(SQLITE_STAT4_SAMPLES), +#endif +#ifdef SQLITE_STMTJRNL_SPILL + "STMTJRNL_SPILL=" CTIMEOPT_VAL(SQLITE_STMTJRNL_SPILL), +#endif +#ifdef SQLITE_SUBSTR_COMPATIBILITY + "SUBSTR_COMPATIBILITY", +#endif +#if (!defined(SQLITE_WIN32_MALLOC) \ + && !defined(SQLITE_ZERO_MALLOC) \ + && !defined(SQLITE_MEMDEBUG) \ + ) || defined(SQLITE_SYSTEM_MALLOC) + "SYSTEM_MALLOC", +#endif +#ifdef SQLITE_TCL + "TCL", +#endif +#ifdef SQLITE_TEMP_STORE + "TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE), +#endif +#ifdef SQLITE_TEST + "TEST", +#endif +#if defined(SQLITE_THREADSAFE) + "THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE), +#elif defined(THREADSAFE) + "THREADSAFE=" CTIMEOPT_VAL(THREADSAFE), +#else + "THREADSAFE=1", +#endif +#ifdef SQLITE_UNLINK_AFTER_CLOSE + "UNLINK_AFTER_CLOSE", +#endif +#ifdef SQLITE_UNTESTABLE + "UNTESTABLE", +#endif +#ifdef SQLITE_USER_AUTHENTICATION + "USER_AUTHENTICATION", +#endif +#ifdef SQLITE_USE_ALLOCA + "USE_ALLOCA", +#endif +#ifdef SQLITE_USE_FCNTL_TRACE + "USE_FCNTL_TRACE", +#endif +#ifdef SQLITE_USE_URI + "USE_URI", +#endif +#ifdef SQLITE_VDBE_COVERAGE + "VDBE_COVERAGE", +#endif +#ifdef SQLITE_WIN32_MALLOC + "WIN32_MALLOC", +#endif +#ifdef SQLITE_ZERO_MALLOC + "ZERO_MALLOC", +#endif + +} ; + +SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt){ + *pnOpt = sizeof(sqlite3azCompileOpt) / sizeof(sqlite3azCompileOpt[0]); + return (const char**)sqlite3azCompileOpt; +} + +#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ + +/************** End of ctime.c ***********************************************/ /************** Begin file global.c ******************************************/ /* ** 2008 June 13 @@ -21081,6 +21891,7 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { 0, /* xTestCallback */ #endif 0, /* bLocaltimeFault */ + 0, /* xAltLocaltime */ 0x7ffffffe, /* iOnceResetThreshold */ SQLITE_DEFAULT_SORTERREF_SIZE, /* szSorterRef */ 0, /* iPrngSeed */ @@ -21093,6 +21904,18 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { */ SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions; +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) +/* +** Counter used for coverage testing. Does not come into play for +** release builds. +** +** Access to this global variable is not mutex protected. This might +** result in TSAN warnings. But as the variable does not exist in +** release builds, that should not be a concern. +*/ +SQLITE_PRIVATE unsigned int sqlite3CoverageCounter; +#endif /* SQLITE_COVERAGE_TEST || SQLITE_DEBUG */ + #ifdef VDBE_PROFILE /* ** The following performance counter can be used in place of @@ -21143,6 +21966,48 @@ SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[] = OPFLG_INITIALIZER; */ SQLITE_PRIVATE const char sqlite3StrBINARY[] = "BINARY"; +/* +** Standard typenames. These names must match the COLTYPE_* definitions. +** Adjust the SQLITE_N_STDTYPE value if adding or removing entries. +** +** sqlite3StdType[] The actual names of the datatypes. +** +** sqlite3StdTypeLen[] The length (in bytes) of each entry +** in sqlite3StdType[]. +** +** sqlite3StdTypeAffinity[] The affinity associated with each entry +** in sqlite3StdType[]. +** +** sqlite3StdTypeMap[] The type value (as returned from +** sqlite3_column_type() or sqlite3_value_type()) +** for each entry in sqlite3StdType[]. +*/ +SQLITE_PRIVATE const unsigned char sqlite3StdTypeLen[] = { 3, 4, 3, 7, 4, 4 }; +SQLITE_PRIVATE const char sqlite3StdTypeAffinity[] = { + SQLITE_AFF_NUMERIC, + SQLITE_AFF_BLOB, + SQLITE_AFF_INTEGER, + SQLITE_AFF_INTEGER, + SQLITE_AFF_REAL, + SQLITE_AFF_TEXT +}; +SQLITE_PRIVATE const char sqlite3StdTypeMap[] = { + 0, + SQLITE_BLOB, + SQLITE_INTEGER, + SQLITE_INTEGER, + SQLITE_FLOAT, + SQLITE_TEXT +}; +SQLITE_PRIVATE const char *sqlite3StdType[] = { + "ANY", + "BLOB", + "INT", + "INTEGER", + "REAL", + "TEXT" +}; + /************** End of global.c **********************************************/ /************** Begin file status.c ******************************************/ /* @@ -21240,7 +22105,7 @@ typedef struct AuxData AuxData; typedef struct VdbeCursor VdbeCursor; struct VdbeCursor { u8 eCurType; /* One of the CURTYPE_* values above */ - i8 iDb; /* Index of cursor database in db->aDb[] (or -1) */ + i8 iDb; /* Index of cursor database in db->aDb[] */ u8 nullRow; /* True if pointing to a row with no data */ u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */ u8 isTable; /* True for rowid tables. False for indexes */ @@ -21253,9 +22118,11 @@ struct VdbeCursor { Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ Bool hasBeenDuped:1; /* This cursor was source or target of OP_OpenDup */ u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */ - Btree *pBtx; /* Separate file holding temporary table */ + union { /* pBtx for isEphermeral. pAltMap otherwise */ + Btree *pBtx; /* Separate file holding temporary table */ + u32 *aAltMap; /* Mapping from table to index column numbers */ + } ub; i64 seqCount; /* Sequence counter */ - u32 *aAltMap; /* Mapping from table to index column numbers */ /* Cached OP_Column parse information is only valid if cacheStatus matches ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of @@ -21345,8 +22212,8 @@ struct VdbeFrame { int nMem; /* Number of entries in aMem */ int nChildMem; /* Number of memory cells for child frame */ int nChildCsr; /* Number of cursors for child frame */ - int nChange; /* Statement changes (Vdbe.nChange) */ - int nDbChange; /* Value of db->nChange */ + i64 nChange; /* Statement changes (Vdbe.nChange) */ + i64 nDbChange; /* Value of db->nChange */ }; /* Magic number for sanity checking on VdbeFrame objects */ @@ -21553,7 +22420,7 @@ struct Vdbe { u32 cacheCtr; /* VdbeCursor row cache generation counter */ int pc; /* The program counter */ int rc; /* Value to return */ - int nChange; /* Number of db changes made since last reset */ + i64 nChange; /* Number of db changes made since last reset */ int iStatement; /* Statement number (or 0 if has no opened stmt) */ i64 iCurrentTime; /* Value of julianday('now') for this statement */ i64 nFkConstraint; /* Number of imm. FK constraints this VM */ @@ -21595,7 +22462,7 @@ struct Vdbe { bft bIsReader:1; /* True for statements that read */ yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */ yDbMask lockMask; /* Subset of btreeMask that requires a lock */ - u32 aCounter[7]; /* Counters used by sqlite3_stmt_status() */ + u32 aCounter[9]; /* Counters used by sqlite3_stmt_status() */ char *zSql; /* Text of the SQL statement that generated this */ #ifdef SQLITE_ENABLE_NORMALIZE char *zNormSql; /* Normalization of the associated SQL statement */ @@ -21645,6 +22512,24 @@ struct PreUpdate { Index *pPk; /* PK index if pTab is WITHOUT ROWID */ }; +/* +** An instance of this object is used to pass an vector of values into +** OP_VFilter, the xFilter method of a virtual table. The vector is the +** set of values on the right-hand side of an IN constraint. +** +** The value as passed into xFilter is an sqlite3_value with a "pointer" +** type, such as is generated by sqlite3_result_pointer() and read by +** sqlite3_value_pointer. Such values have MEM_Term|MEM_Subtype|MEM_Null +** and a subtype of 'p'. The sqlite3_vtab_in_first() and _next() interfaces +** know how to use this object to step through all the values in the +** right operand of the IN constraint. +*/ +typedef struct ValueList ValueList; +struct ValueList { + BtCursor *pCsr; /* An ephemeral table holding all values */ + sqlite3_value *pOut; /* Register to hold each decoded output value */ +}; + /* ** Function prototypes */ @@ -21657,7 +22542,7 @@ SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor*); SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32); SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8); SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, Mem*, u32); -SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); +SQLITE_PRIVATE void sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3*, AuxData**, int, int); int sqlite2BtreeKeyCompare(BtCursor *, const void *, int, int, int *); @@ -21691,14 +22576,19 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem*, double); SQLITE_PRIVATE void sqlite3VdbeMemSetPointer(Mem*, void*, const char*, void(*)(void*)); SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem*,sqlite3*,u16); SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem*); +#ifndef SQLITE_OMIT_INCRBLOB SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem*,int); +#else +SQLITE_PRIVATE int sqlite3VdbeMemSetZeroBlob(Mem*,int); +#endif #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem*); #endif SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8); -SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*); +SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double); +SQLITE_PRIVATE i64 sqlite3VdbeIntValue(const Mem*); SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem*); SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem*); SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem*, int ifNull); @@ -22676,8 +23566,10 @@ static void clearYMD_HMS_TZ(DateTime *p){ ** is available. This routine returns 0 on success and ** non-zero on any kind of error. ** -** If the sqlite3GlobalConfig.bLocaltimeFault variable is true then this -** routine will always fail. +** If the sqlite3GlobalConfig.bLocaltimeFault variable is non-zero then this +** routine will always fail. If bLocaltimeFault is nonzero and +** sqlite3GlobalConfig.xAltLocaltime is not NULL, then xAltLocaltime() is +** invoked in place of the OS-defined localtime() function. ** ** EVIDENCE-OF: R-62172-00036 In this implementation, the standard C ** library function localtime_r() is used to assist in the calculation of @@ -22693,14 +23585,30 @@ static int osLocaltime(time_t *t, struct tm *pTm){ sqlite3_mutex_enter(mutex); pX = localtime(t); #ifndef SQLITE_UNTESTABLE - if( sqlite3GlobalConfig.bLocaltimeFault ) pX = 0; + if( sqlite3GlobalConfig.bLocaltimeFault ){ + if( sqlite3GlobalConfig.xAltLocaltime!=0 + && 0==sqlite3GlobalConfig.xAltLocaltime((const void*)t,(void*)pTm) + ){ + pX = pTm; + }else{ + pX = 0; + } + } #endif if( pX ) *pTm = *pX; +#if SQLITE_THREADSAFE>0 sqlite3_mutex_leave(mutex); +#endif rc = pX==0; #else #ifndef SQLITE_UNTESTABLE - if( sqlite3GlobalConfig.bLocaltimeFault ) return 1; + if( sqlite3GlobalConfig.bLocaltimeFault ){ + if( sqlite3GlobalConfig.xAltLocaltime!=0 ){ + return sqlite3GlobalConfig.xAltLocaltime((const void*)t,(void*)pTm); + }else{ + return 1; + } + } #endif #if HAVE_LOCALTIME_R rc = localtime_r(t, pTm)==0; @@ -22715,67 +23623,56 @@ static int osLocaltime(time_t *t, struct tm *pTm){ #ifndef SQLITE_OMIT_LOCALTIME /* -** Compute the difference (in milliseconds) between localtime and UTC -** (a.k.a. GMT) for the time value p where p is in UTC. If no error occurs, -** return this value and set *pRc to SQLITE_OK. -** -** Or, if an error does occur, set *pRc to SQLITE_ERROR. The returned value -** is undefined in this case. +** Assuming the input DateTime is UTC, move it to its localtime equivalent. */ -static sqlite3_int64 localtimeOffset( - DateTime *p, /* Date at which to calculate offset */ - sqlite3_context *pCtx, /* Write error here if one occurs */ - int *pRc /* OUT: Error code. SQLITE_OK or ERROR */ +static int toLocaltime( + DateTime *p, /* Date at which to calculate offset */ + sqlite3_context *pCtx /* Write error here if one occurs */ ){ - DateTime x, y; time_t t; struct tm sLocal; + int iYearDiff; /* Initialize the contents of sLocal to avoid a compiler warning. */ memset(&sLocal, 0, sizeof(sLocal)); - x = *p; - computeYMD_HMS(&x); - if( x.Y<1971 || x.Y>=2038 ){ + computeJD(p); + if( p->iJD<2108667600*(i64)100000 /* 1970-01-01 */ + || p->iJD>2130141456*(i64)100000 /* 2038-01-18 */ + ){ /* EVIDENCE-OF: R-55269-29598 The localtime_r() C function normally only ** works for years between 1970 and 2037. For dates outside this range, ** SQLite attempts to map the year into an equivalent year within this ** range, do the calculation, then map the year back. */ - x.Y = 2000; - x.M = 1; - x.D = 1; - x.h = 0; - x.m = 0; - x.s = 0.0; - } else { - int s = (int)(x.s + 0.5); - x.s = s; + DateTime x = *p; + computeYMD_HMS(&x); + iYearDiff = (2000 + x.Y%4) - x.Y; + x.Y += iYearDiff; + x.validJD = 0; + computeJD(&x); + t = (time_t)(x.iJD/1000 - 21086676*(i64)10000); + }else{ + iYearDiff = 0; + t = (time_t)(p->iJD/1000 - 21086676*(i64)10000); } - x.tz = 0; - x.validJD = 0; - computeJD(&x); - t = (time_t)(x.iJD/1000 - 21086676*(i64)10000); if( osLocaltime(&t, &sLocal) ){ sqlite3_result_error(pCtx, "local time unavailable", -1); - *pRc = SQLITE_ERROR; - return 0; + return SQLITE_ERROR; } - y.Y = sLocal.tm_year + 1900; - y.M = sLocal.tm_mon + 1; - y.D = sLocal.tm_mday; - y.h = sLocal.tm_hour; - y.m = sLocal.tm_min; - y.s = sLocal.tm_sec; - y.validYMD = 1; - y.validHMS = 1; - y.validJD = 0; - y.rawS = 0; - y.validTZ = 0; - y.isError = 0; - computeJD(&y); - *pRc = SQLITE_OK; - return y.iJD - x.iJD; + p->Y = sLocal.tm_year + 1900 - iYearDiff; + p->M = sLocal.tm_mon + 1; + p->D = sLocal.tm_mday; + p->h = sLocal.tm_hour; + p->m = sLocal.tm_min; + p->s = sLocal.tm_sec + (p->iJD%1000)*0.001; + p->validYMD = 1; + p->validHMS = 1; + p->validJD = 0; + p->rawS = 0; + p->validTZ = 0; + p->isError = 0; + return SQLITE_OK; } #endif /* SQLITE_OMIT_LOCALTIME */ @@ -22788,18 +23685,17 @@ static sqlite3_int64 localtimeOffset( ** of several units of time. */ static const struct { - u8 eType; /* Transformation type code */ - u8 nName; /* Length of th name */ - char *zName; /* Name of the transformation */ - double rLimit; /* Maximum NNN value for this transform */ - double rXform; /* Constant used for this transform */ + u8 nName; /* Length of the name */ + char zName[7]; /* Name of the transformation */ + float rLimit; /* Maximum NNN value for this transform */ + float rXform; /* Constant used for this transform */ } aXformType[] = { - { 0, 6, "second", 464269060800.0, 1000.0 }, - { 0, 6, "minute", 7737817680.0, 60000.0 }, - { 0, 4, "hour", 128963628.0, 3600000.0 }, - { 0, 3, "day", 5373485.0, 86400000.0 }, - { 1, 5, "month", 176546.0, 2592000000.0 }, - { 2, 4, "year", 14713.0, 31536000000.0 }, + { 6, "second", 4.6427e+14, 1.0 }, + { 6, "minute", 7.7379e+12, 60.0 }, + { 4, "hour", 1.2897e+11, 3600.0 }, + { 3, "day", 5373485.0, 86400.0 }, + { 5, "month", 176546.0, 2592000.0 }, + { 4, "year", 14713.0, 31536000.0 }, }; /* @@ -22830,11 +23726,55 @@ static int parseModifier( sqlite3_context *pCtx, /* Function context */ const char *z, /* The text of the modifier */ int n, /* Length of zMod in bytes */ - DateTime *p /* The date/time value to be modified */ + DateTime *p, /* The date/time value to be modified */ + int idx /* Parameter index of the modifier */ ){ int rc = 1; double r; switch(sqlite3UpperToLower[(u8)z[0]] ){ + case 'a': { + /* + ** auto + ** + ** If rawS is available, then interpret as a julian day number, or + ** a unix timestamp, depending on its magnitude. + */ + if( sqlite3_stricmp(z, "auto")==0 ){ + if( idx>1 ) return 1; /* IMP: R-33611-57934 */ + if( !p->rawS || p->validJD ){ + rc = 0; + p->rawS = 0; + }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */ + && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */ + ){ + r = p->s*1000.0 + 210866760000000.0; + clearYMD_HMS_TZ(p); + p->iJD = (sqlite3_int64)(r + 0.5); + p->validJD = 1; + p->rawS = 0; + rc = 0; + } + } + break; + } + case 'j': { + /* + ** julianday + ** + ** Always interpret the prior number as a julian-day value. If this + ** is not the first modifier, or if the prior argument is not a numeric + ** value in the allowed range of julian day numbers understood by + ** SQLite (0..5373484.5) then the result will be NULL. + */ + if( sqlite3_stricmp(z, "julianday")==0 ){ + if( idx>1 ) return 1; /* IMP: R-31176-64601 */ + if( p->validJD && p->rawS ){ + rc = 0; + p->rawS = 0; + } + } + break; + } #ifndef SQLITE_OMIT_LOCALTIME case 'l': { /* localtime @@ -22843,9 +23783,7 @@ static int parseModifier( ** show local time. */ if( sqlite3_stricmp(z, "localtime")==0 && sqlite3NotPureFunc(pCtx) ){ - computeJD(p); - p->iJD += localtimeOffset(p, pCtx, &rc); - clearYMD_HMS_TZ(p); + rc = toLocaltime(p, pCtx); } break; } @@ -22858,6 +23796,7 @@ static int parseModifier( ** seconds since 1970. Convert to a real julian day number. */ if( sqlite3_stricmp(z, "unixepoch")==0 && p->rawS ){ + if( idx>1 ) return 1; /* IMP: R-49255-55373 */ r = p->s*1000.0 + 210866760000000.0; if( r>=0.0 && r<464269060800000.0 ){ clearYMD_HMS_TZ(p); @@ -22870,18 +23809,31 @@ static int parseModifier( #ifndef SQLITE_OMIT_LOCALTIME else if( sqlite3_stricmp(z, "utc")==0 && sqlite3NotPureFunc(pCtx) ){ if( p->tzSet==0 ){ - sqlite3_int64 c1; + i64 iOrigJD; /* Original localtime */ + i64 iGuess; /* Guess at the corresponding utc time */ + int cnt = 0; /* Safety to prevent infinite loop */ + int iErr; /* Guess is off by this much */ + computeJD(p); - c1 = localtimeOffset(p, pCtx, &rc); - if( rc==SQLITE_OK ){ - p->iJD -= c1; - clearYMD_HMS_TZ(p); - p->iJD += c1 - localtimeOffset(p, pCtx, &rc); - } + iGuess = iOrigJD = p->iJD; + iErr = 0; + do{ + DateTime new; + memset(&new, 0, sizeof(new)); + iGuess -= iErr; + new.iJD = iGuess; + new.validJD = 1; + rc = toLocaltime(&new, pCtx); + if( rc ) return rc; + computeJD(&new); + iErr = new.iJD - iOrigJD; + }while( iErr && cnt++<3 ); + memset(p, 0, sizeof(*p)); + p->iJD = iGuess; + p->validJD = 1; p->tzSet = 1; - }else{ - rc = SQLITE_OK; } + rc = SQLITE_OK; } #endif break; @@ -22997,9 +23949,10 @@ static int parseModifier( && sqlite3_strnicmp(aXformType[i].zName, z, n)==0 && r>-aXformType[i].rLimit && rM += (int)r; x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; @@ -23009,8 +23962,9 @@ static int parseModifier( r -= (int)r; break; } - case 2: { /* Special processing to add years */ + case 5: { /* Special processing to add years */ int y = (int)r; + assert( strcmp(aXformType[i].zName,"year")==0 ); computeYMD_HMS(p); p->Y += y; p->validJD = 0; @@ -23019,7 +23973,7 @@ static int parseModifier( } } computeJD(p); - p->iJD += (sqlite3_int64)(r*aXformType[i].rXform + rRounder); + p->iJD += (sqlite3_int64)(r*1000.0*aXformType[i].rXform + rRounder); rc = 0; break; } @@ -23069,7 +24023,7 @@ static int isDate( for(i=1; iisError || !validJulianDay(p->iJD) ) return 1; @@ -23099,6 +24053,24 @@ static void juliandayFunc( } } +/* +** unixepoch( TIMESTRING, MOD, MOD, ...) +** +** Return the number of seconds (including fractional seconds) since +** the unix epoch of 1970-01-01 00:00:00 GMT. +*/ +static void unixepochFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + if( isDate(context, argc, argv, &x)==0 ){ + computeJD(&x); + sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + } +} + /* ** datetime( TIMESTRING, MOD, MOD, ...) ** @@ -23111,11 +24083,38 @@ static void datetimeFunc( ){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - char zBuf[100]; + int Y, s; + char zBuf[24]; computeYMD_HMS(&x); - sqlite3_snprintf(sizeof(zBuf), zBuf, "%04d-%02d-%02d %02d:%02d:%02d", - x.Y, x.M, x.D, x.h, x.m, (int)(x.s)); - sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); + Y = x.Y; + if( Y<0 ) Y = -Y; + zBuf[1] = '0' + (Y/1000)%10; + zBuf[2] = '0' + (Y/100)%10; + zBuf[3] = '0' + (Y/10)%10; + zBuf[4] = '0' + (Y)%10; + zBuf[5] = '-'; + zBuf[6] = '0' + (x.M/10)%10; + zBuf[7] = '0' + (x.M)%10; + zBuf[8] = '-'; + zBuf[9] = '0' + (x.D/10)%10; + zBuf[10] = '0' + (x.D)%10; + zBuf[11] = ' '; + zBuf[12] = '0' + (x.h/10)%10; + zBuf[13] = '0' + (x.h)%10; + zBuf[14] = ':'; + zBuf[15] = '0' + (x.m/10)%10; + zBuf[16] = '0' + (x.m)%10; + zBuf[17] = ':'; + s = (int)x.s; + zBuf[18] = '0' + (s/10)%10; + zBuf[19] = '0' + (s)%10; + zBuf[20] = 0; + if( x.Y<0 ){ + zBuf[0] = '-'; + sqlite3_result_text(context, zBuf, 20, SQLITE_TRANSIENT); + }else{ + sqlite3_result_text(context, &zBuf[1], 19, SQLITE_TRANSIENT); + } } } @@ -23131,10 +24130,20 @@ static void timeFunc( ){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - char zBuf[100]; + int s; + char zBuf[16]; computeHMS(&x); - sqlite3_snprintf(sizeof(zBuf), zBuf, "%02d:%02d:%02d", x.h, x.m, (int)x.s); - sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); + zBuf[0] = '0' + (x.h/10)%10; + zBuf[1] = '0' + (x.h)%10; + zBuf[2] = ':'; + zBuf[3] = '0' + (x.m/10)%10; + zBuf[4] = '0' + (x.m)%10; + zBuf[5] = ':'; + s = (int)x.s; + zBuf[6] = '0' + (s/10)%10; + zBuf[7] = '0' + (s)%10; + zBuf[8] = 0; + sqlite3_result_text(context, zBuf, 8, SQLITE_TRANSIENT); } } @@ -23150,10 +24159,28 @@ static void dateFunc( ){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - char zBuf[100]; + int Y; + char zBuf[16]; computeYMD(&x); - sqlite3_snprintf(sizeof(zBuf), zBuf, "%04d-%02d-%02d", x.Y, x.M, x.D); - sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); + Y = x.Y; + if( Y<0 ) Y = -Y; + zBuf[1] = '0' + (Y/1000)%10; + zBuf[2] = '0' + (Y/100)%10; + zBuf[3] = '0' + (Y/10)%10; + zBuf[4] = '0' + (Y)%10; + zBuf[5] = '-'; + zBuf[6] = '0' + (x.M/10)%10; + zBuf[7] = '0' + (x.M)%10; + zBuf[8] = '-'; + zBuf[9] = '0' + (x.D/10)%10; + zBuf[10] = '0' + (x.D)%10; + zBuf[11] = 0; + if( x.Y<0 ){ + zBuf[0] = '-'; + sqlite3_result_text(context, zBuf, 11, SQLITE_TRANSIENT); + }else{ + sqlite3_result_text(context, &zBuf[1], 10, SQLITE_TRANSIENT); + } } } @@ -23182,131 +24209,100 @@ static void strftimeFunc( sqlite3_value **argv ){ DateTime x; - u64 n; size_t i,j; - char *z; sqlite3 *db; const char *zFmt; - char zBuf[100]; + sqlite3_str sRes; + + if( argc==0 ) return; zFmt = (const char*)sqlite3_value_text(argv[0]); if( zFmt==0 || isDate(context, argc-1, argv+1, &x) ) return; db = sqlite3_context_db_handle(context); - for(i=0, n=1; zFmt[i]; i++, n++){ - if( zFmt[i]=='%' ){ - switch( zFmt[i+1] ){ - case 'd': - case 'H': - case 'm': - case 'M': - case 'S': - case 'W': - n++; - /* fall thru */ - case 'w': - case '%': - break; - case 'f': - n += 8; - break; - case 'j': - n += 3; - break; - case 'Y': - n += 8; - break; - case 's': - case 'J': - n += 50; - break; - default: - return; /* ERROR. return a NULL */ - } - i++; - } - } - testcase( n==sizeof(zBuf)-1 ); - testcase( n==sizeof(zBuf) ); - testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); - testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ); - if( n(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ){ - sqlite3_result_error_toobig(context); - return; - }else{ - z = sqlite3DbMallocRawNN(db, (int)n); - if( z==0 ){ - sqlite3_result_error_nomem(context); - return; - } - } + sqlite3StrAccumInit(&sRes, 0, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]); + computeJD(&x); computeYMD_HMS(&x); for(i=j=0; zFmt[i]; i++){ - if( zFmt[i]!='%' ){ - z[j++] = zFmt[i]; - }else{ - i++; - switch( zFmt[i] ){ - case 'd': sqlite3_snprintf(3, &z[j],"%02d",x.D); j+=2; break; - case 'f': { - double s = x.s; - if( s>59.999 ) s = 59.999; - sqlite3_snprintf(7, &z[j],"%06.3f", s); - j += sqlite3Strlen30(&z[j]); - break; - } - case 'H': sqlite3_snprintf(3, &z[j],"%02d",x.h); j+=2; break; - case 'W': /* Fall thru */ - case 'j': { - int nDay; /* Number of days since 1st day of year */ - DateTime y = x; - y.validJD = 0; - y.M = 1; - y.D = 1; - computeJD(&y); - nDay = (int)((x.iJD-y.iJD+43200000)/86400000); - if( zFmt[i]=='W' ){ - int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ - wd = (int)(((x.iJD+43200000)/86400000)%7); - sqlite3_snprintf(3, &z[j],"%02d",(nDay+7-wd)/7); - j += 2; - }else{ - sqlite3_snprintf(4, &z[j],"%03d",nDay+1); - j += 3; - } - break; - } - case 'J': { - sqlite3_snprintf(20, &z[j],"%.16g",x.iJD/86400000.0); - j+=sqlite3Strlen30(&z[j]); - break; - } - case 'm': sqlite3_snprintf(3, &z[j],"%02d",x.M); j+=2; break; - case 'M': sqlite3_snprintf(3, &z[j],"%02d",x.m); j+=2; break; - case 's': { - i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); - sqlite3Int64ToText(iS, &z[j]); - j += sqlite3Strlen30(&z[j]); - break; - } - case 'S': sqlite3_snprintf(3,&z[j],"%02d",(int)x.s); j+=2; break; - case 'w': { - z[j++] = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; - break; - } - case 'Y': { - sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=sqlite3Strlen30(&z[j]); - break; + if( zFmt[i]!='%' ) continue; + if( j59.999 ) s = 59.999; + sqlite3_str_appendf(&sRes, "%06.3f", s); + break; + } + case 'H': { + sqlite3_str_appendf(&sRes, "%02d", x.h); + break; + } + case 'W': /* Fall thru */ + case 'j': { + int nDay; /* Number of days since 1st day of year */ + DateTime y = x; + y.validJD = 0; + y.M = 1; + y.D = 1; + computeJD(&y); + nDay = (int)((x.iJD-y.iJD+43200000)/86400000); + if( zFmt[i]=='W' ){ + int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ + wd = (int)(((x.iJD+43200000)/86400000)%7); + sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7); + }else{ + sqlite3_str_appendf(&sRes,"%03d",nDay+1); } - default: z[j++] = '%'; break; + break; + } + case 'J': { + sqlite3_str_appendf(&sRes,"%.16g",x.iJD/86400000.0); + break; + } + case 'm': { + sqlite3_str_appendf(&sRes,"%02d",x.M); + break; + } + case 'M': { + sqlite3_str_appendf(&sRes,"%02d",x.m); + break; + } + case 's': { + i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); + sqlite3_str_appendf(&sRes,"%lld",iS); + break; + } + case 'S': { + sqlite3_str_appendf(&sRes,"%02d",(int)x.s); + break; + } + case 'w': { + sqlite3_str_appendchar(&sRes, 1, + (char)(((x.iJD+129600000)/86400000) % 7) + '0'); + break; + } + case 'Y': { + sqlite3_str_appendf(&sRes,"%04d",x.Y); + break; + } + case '%': { + sqlite3_str_appendchar(&sRes, 1, '%'); + break; + } + default: { + sqlite3_str_reset(&sRes); + return; } } } - z[j] = 0; - sqlite3_result_text(context, z, -1, - z==zBuf ? SQLITE_TRANSIENT : SQLITE_DYNAMIC); + if( jpMethods==0) ) return 0; return id->pMethods->xDeviceCharacteristics(id); } #ifndef SQLITE_OMIT_WAL @@ -23741,12 +24739,15 @@ SQLITE_PRIVATE int sqlite3OsOpenMalloc( rc = sqlite3OsOpen(pVfs, zFile, pFile, flags, pOutFlags); if( rc!=SQLITE_OK ){ sqlite3_free(pFile); + *ppFile = 0; }else{ *ppFile = pFile; } }else{ + *ppFile = 0; rc = SQLITE_NOMEM_BKPT; } + assert( *ppFile!=0 || rc!=SQLITE_OK ); return rc; } SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *pFile){ @@ -24464,7 +25465,7 @@ static void adjustStats(int iSize, int increment){ ** This routine checks the guards at either end of the allocation and ** if they are incorrect it asserts. */ -static struct MemBlockHdr *sqlite3MemsysGetHeader(void *pAllocation){ +static struct MemBlockHdr *sqlite3MemsysGetHeader(const void *pAllocation){ struct MemBlockHdr *p; int *pInt; u8 *pU8; @@ -24711,7 +25712,7 @@ SQLITE_PRIVATE void sqlite3MemdebugSetType(void *p, u8 eType){ ** ** assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); */ -SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){ +SQLITE_PRIVATE int sqlite3MemdebugHasType(const void *p, u8 eType){ int rc = 1; if( p && sqlite3GlobalConfig.m.xFree==sqlite3MemFree ){ struct MemBlockHdr *pHdr; @@ -24733,7 +25734,7 @@ SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){ ** ** assert( sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) ); */ -SQLITE_PRIVATE int sqlite3MemdebugNoType(void *p, u8 eType){ +SQLITE_PRIVATE int sqlite3MemdebugNoType(const void *p, u8 eType){ int rc = 1; if( p && sqlite3GlobalConfig.m.xFree==sqlite3MemFree ){ struct MemBlockHdr *pHdr; @@ -27111,205 +28112,7 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ /* ** Include code that is common to all os_*.c files */ -/************** Include os_common.h in the middle of mutex_w32.c *************/ -/************** Begin file os_common.h ***************************************/ -/* -** 2004 May 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains macros and a little bit of code that is common to -** all of the platform-specific files (os_*.c) and is #included into those -** files. -** -** This file should be #included by the os_*.c files only. It is not a -** general purpose header file. -*/ -#ifndef _OS_COMMON_H_ -#define _OS_COMMON_H_ - -/* -** At least two bugs have slipped in because we changed the MEMORY_DEBUG -** macro to SQLITE_DEBUG and some older makefiles have not yet made the -** switch. The following code should catch this problem at compile-time. -*/ -#ifdef MEMORY_DEBUG -# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." -#endif - -/* -** Macros for performance tracing. Normally turned off. Only works -** on i486 hardware. -*/ -#ifdef SQLITE_PERFORMANCE_TRACE - -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of os_common.h ****************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 and x86_64 class CPUs. -*/ -#ifndef SQLITE_HWTIME_H -#define SQLITE_HWTIME_H - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if !defined(__STRICT_ANSI__) && \ - (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - /* - ** asm() is needed for hardware timing support. Without asm(), - ** disable the sqlite3Hwtime() routine. - ** - ** sqlite3Hwtime() is only used for some obscure debugging - ** and analysis configurations, not in any deliverable, so this - ** should not be a great loss. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(SQLITE_HWTIME_H) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in os_common.h ******************/ - -static sqlite_uint64 g_start; -static sqlite_uint64 g_elapsed; -#define TIMER_START g_start=sqlite3Hwtime() -#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start -#define TIMER_ELAPSED g_elapsed -#else -#define TIMER_START -#define TIMER_END -#define TIMER_ELAPSED ((sqlite_uint64)0) -#endif - -/* -** If we compile with the SQLITE_TEST macro set, then the following block -** of code will give us the ability to simulate a disk I/O error. This -** is used for testing the I/O recovery logic. -*/ -#if defined(SQLITE_TEST) -SQLITE_API extern int sqlite3_io_error_hit; -SQLITE_API extern int sqlite3_io_error_hardhit; -SQLITE_API extern int sqlite3_io_error_pending; -SQLITE_API extern int sqlite3_io_error_persist; -SQLITE_API extern int sqlite3_io_error_benign; -SQLITE_API extern int sqlite3_diskfull_pending; -SQLITE_API extern int sqlite3_diskfull; -#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) -#define SimulateIOError(CODE) \ - if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ - || sqlite3_io_error_pending-- == 1 ) \ - { local_ioerr(); CODE; } -static void local_ioerr(){ - IOTRACE(("IOERR\n")); - sqlite3_io_error_hit++; - if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; -} -#define SimulateDiskfullError(CODE) \ - if( sqlite3_diskfull_pending ){ \ - if( sqlite3_diskfull_pending == 1 ){ \ - local_ioerr(); \ - sqlite3_diskfull = 1; \ - sqlite3_io_error_hit = 1; \ - CODE; \ - }else{ \ - sqlite3_diskfull_pending--; \ - } \ - } -#else -#define SimulateIOErrorBenign(X) -#define SimulateIOError(A) -#define SimulateDiskfullError(A) -#endif /* defined(SQLITE_TEST) */ - -/* -** When testing, keep a count of the number of open files. -*/ -#if defined(SQLITE_TEST) -SQLITE_API extern int sqlite3_open_file_count; -#define OpenCounter(X) sqlite3_open_file_count+=(X) -#else -#define OpenCounter(X) -#endif /* defined(SQLITE_TEST) */ - -#endif /* !defined(_OS_COMMON_H_) */ - -/************** End of os_common.h *******************************************/ -/************** Continuing where we left off in mutex_w32.c ******************/ +/* #include "os_common.h" */ /* ** Include the header file for the Windows VFS. @@ -28102,7 +28905,7 @@ SQLITE_API void *sqlite3_malloc64(sqlite3_uint64 n){ ** TRUE if p is a lookaside memory allocation from db */ #ifndef SQLITE_OMIT_LOOKASIDE -static int isLookaside(sqlite3 *db, void *p){ +static int isLookaside(sqlite3 *db, const void *p){ return SQLITE_WITHIN(p, db->lookaside.pStart, db->lookaside.pEnd); } #else @@ -28113,18 +28916,18 @@ static int isLookaside(sqlite3 *db, void *p){ ** Return the size of a memory allocation previously obtained from ** sqlite3Malloc() or sqlite3_malloc(). */ -SQLITE_PRIVATE int sqlite3MallocSize(void *p){ +SQLITE_PRIVATE int sqlite3MallocSize(const void *p){ assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); - return sqlite3GlobalConfig.m.xSize(p); + return sqlite3GlobalConfig.m.xSize((void*)p); } -static int lookasideMallocSize(sqlite3 *db, void *p){ +static int lookasideMallocSize(sqlite3 *db, const void *p){ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE return plookaside.pMiddle ? db->lookaside.szTrue : LOOKASIDE_SMALL; #else return db->lookaside.szTrue; #endif } -SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){ +SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, const void *p){ assert( p!=0 ); #ifdef SQLITE_DEBUG if( db==0 || !isLookaside(db,p) ){ @@ -28151,7 +28954,7 @@ SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){ } } } - return sqlite3GlobalConfig.m.xSize(p); + return sqlite3GlobalConfig.m.xSize((void*)p); } SQLITE_API sqlite3_uint64 sqlite3_msize(void *p){ assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); @@ -28536,8 +29339,9 @@ SQLITE_PRIVATE char *sqlite3DbSpanDup(sqlite3 *db, const char *zStart, const cha ** Free any prior content in *pz and replace it with a copy of zNew. */ SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zNew){ + char *z = sqlite3DbStrDup(db, zNew); sqlite3DbFree(db, *pz); - *pz = sqlite3DbStrDup(db, zNew); + *pz = z; } /* @@ -28545,8 +29349,15 @@ SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zNew){ ** has happened. This routine will set db->mallocFailed, and also ** temporarily disable the lookaside memory allocator and interrupt ** any running VDBEs. +** +** Always return a NULL pointer so that this routine can be invoked using +** +** return sqlite3OomFault(db); +** +** and thereby avoid unnecessary stack frame allocations for the overwhelmingly +** common case where no OOM occurs. */ -SQLITE_PRIVATE void sqlite3OomFault(sqlite3 *db){ +SQLITE_PRIVATE void *sqlite3OomFault(sqlite3 *db){ if( db->mallocFailed==0 && db->bBenignMalloc==0 ){ db->mallocFailed = 1; if( db->nVdbeExec>0 ){ @@ -28554,9 +29365,11 @@ SQLITE_PRIVATE void sqlite3OomFault(sqlite3 *db){ } DisableLookaside; if( db->pParse ){ + sqlite3ErrorMsg(db->pParse, "out of memory"); db->pParse->rc = SQLITE_NOMEM_BKPT; } } + return 0; } /* @@ -28761,7 +29574,7 @@ static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ /* ** Set the StrAccum object to an error mode. */ -static void setStrAccumError(StrAccum *p, u8 eError){ +SQLITE_PRIVATE void sqlite3StrAccumSetError(StrAccum *p, u8 eError){ assert( eError==SQLITE_NOMEM || eError==SQLITE_TOOBIG ); p->accError = eError; if( p->mxAlloc ) sqlite3_str_reset(p); @@ -28797,12 +29610,12 @@ static char *printfTempBuf(sqlite3_str *pAccum, sqlite3_int64 n){ char *z; if( pAccum->accError ) return 0; if( n>pAccum->nAlloc && n>pAccum->mxAlloc ){ - setStrAccumError(pAccum, SQLITE_TOOBIG); + sqlite3StrAccumSetError(pAccum, SQLITE_TOOBIG); return 0; } z = sqlite3DbMallocRaw(pAccum->db, n); if( z==0 ){ - setStrAccumError(pAccum, SQLITE_NOMEM); + sqlite3StrAccumSetError(pAccum, SQLITE_NOMEM); } return z; } @@ -29465,12 +30278,22 @@ SQLITE_API void sqlite3_str_vappendf( goto adjust_width_for_utf8; } case etTOKEN: { - Token *pToken; if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return; - pToken = va_arg(ap, Token*); - assert( bArgList==0 ); - if( pToken && pToken->n ){ - sqlite3_str_append(pAccum, (const char*)pToken->z, pToken->n); + if( flag_alternateform ){ + /* %#T means an Expr pointer that uses Expr.u.zToken */ + Expr *pExpr = va_arg(ap,Expr*); + if( ALWAYS(pExpr) && ALWAYS(!ExprHasProperty(pExpr,EP_IntValue)) ){ + sqlite3_str_appendall(pAccum, (const char*)pExpr->u.zToken); + sqlite3RecordErrorOffsetOfExpr(pAccum->db, pExpr); + } + }else{ + /* %T means a Token pointer */ + Token *pToken = va_arg(ap, Token*); + assert( bArgList==0 ); + if( pToken && pToken->n ){ + sqlite3_str_append(pAccum, (const char*)pToken->z, pToken->n); + sqlite3RecordErrorByteOffset(pAccum->db, pToken->z); + } } length = width = 0; break; @@ -29525,6 +30348,42 @@ SQLITE_API void sqlite3_str_vappendf( }/* End for loop over the format string */ } /* End of function */ + +/* +** The z string points to the first character of a token that is +** associated with an error. If db does not already have an error +** byte offset recorded, try to compute the error byte offset for +** z and set the error byte offset in db. +*/ +SQLITE_PRIVATE void sqlite3RecordErrorByteOffset(sqlite3 *db, const char *z){ + const Parse *pParse; + const char *zText; + const char *zEnd; + assert( z!=0 ); + if( NEVER(db==0) ) return; + if( db->errByteOffset!=(-2) ) return; + pParse = db->pParse; + if( NEVER(pParse==0) ) return; + zText =pParse->zTail; + if( NEVER(zText==0) ) return; + zEnd = &zText[strlen(zText)]; + if( SQLITE_WITHIN(z,zText,zEnd) ){ + db->errByteOffset = (int)(z-zText); + } +} + +/* +** If pExpr has a byte offset for the start of a token, record that as +** as the error offset. +*/ +SQLITE_PRIVATE void sqlite3RecordErrorOffsetOfExpr(sqlite3 *db, const Expr *pExpr){ + while( pExpr && (ExprHasProperty(pExpr,EP_FromJoin) || pExpr->w.iOfst<=0) ){ + pExpr = pExpr->pLeft; + } + if( pExpr==0 ) return; + db->errByteOffset = pExpr->w.iOfst; +} + /* ** Enlarge the memory allocation on a StrAccum object so that it is ** able to accept at least N more bytes of text. @@ -29532,7 +30391,7 @@ SQLITE_API void sqlite3_str_vappendf( ** Return the number of bytes of text that StrAccum is able to accept ** after the attempted enlargement. The value returned might be zero. */ -static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ +SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){ char *zNew; assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */ if( p->accError ){ @@ -29541,7 +30400,7 @@ static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ return 0; } if( p->mxAlloc==0 ){ - setStrAccumError(p, SQLITE_TOOBIG); + sqlite3StrAccumSetError(p, SQLITE_TOOBIG); return p->nAlloc - p->nChar - 1; }else{ char *zOld = isMalloced(p) ? p->zText : 0; @@ -29554,7 +30413,7 @@ static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ } if( szNew > p->mxAlloc ){ sqlite3_str_reset(p); - setStrAccumError(p, SQLITE_TOOBIG); + sqlite3StrAccumSetError(p, SQLITE_TOOBIG); return 0; }else{ p->nAlloc = (int)szNew; @@ -29572,7 +30431,7 @@ static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ p->printfFlags |= SQLITE_PRINTF_MALLOCED; }else{ sqlite3_str_reset(p); - setStrAccumError(p, SQLITE_NOMEM); + sqlite3StrAccumSetError(p, SQLITE_NOMEM); return 0; } } @@ -29645,7 +30504,7 @@ static SQLITE_NOINLINE char *strAccumFinishRealloc(StrAccum *p){ memcpy(zText, p->zText, p->nChar+1); p->printfFlags |= SQLITE_PRINTF_MALLOCED; }else{ - setStrAccumError(p, SQLITE_NOMEM); + sqlite3StrAccumSetError(p, SQLITE_NOMEM); } p->zText = zText; return zText; @@ -29660,6 +30519,22 @@ SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){ return p->zText; } +/* +** Use the content of the StrAccum passed as the second argument +** as the result of an SQL function. +*/ +SQLITE_PRIVATE void sqlite3ResultStrAccum(sqlite3_context *pCtx, StrAccum *p){ + if( p->accError ){ + sqlite3_result_error_code(pCtx, p->accError); + sqlite3_str_reset(p); + }else if( isMalloced(p) ){ + sqlite3_result_text(pCtx, p->zText, p->nChar, SQLITE_DYNAMIC); + }else{ + sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC); + sqlite3_str_reset(p); + } +} + /* ** This singleton is an sqlite3_str object that is returned if ** sqlite3_malloc() fails to provide space for a real one. This @@ -30080,6 +30955,8 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) } if( pItem->fg.jointype & JT_LEFT ){ sqlite3_str_appendf(&x, " LEFT-JOIN"); + }else if( pItem->fg.jointype & JT_CROSS ){ + sqlite3_str_appendf(&x, " CROSS-JOIN"); } if( pItem->fg.fromDDL ){ sqlite3_str_appendf(&x, " DDL"); @@ -30348,7 +31225,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m sqlite3_str_appendf(&x, " fg.af=%x.%c", pExpr->flags, pExpr->affExpr ? pExpr->affExpr : 'n'); if( ExprHasProperty(pExpr, EP_FromJoin) ){ - sqlite3_str_appendf(&x, " iRJT=%d", pExpr->iRightJoinTable); + sqlite3_str_appendf(&x, " iRJT=%d", pExpr->w.iRightJoinTable); } if( ExprHasProperty(pExpr, EP_FromDDL) ){ sqlite3_str_appendf(&x, " DDL"); @@ -30378,6 +31255,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m sqlite3TreeViewLine(pView, "COLUMN(%d)%s%s", pExpr->iColumn, zFlgs, zOp2); }else{ + assert( ExprUseYTab(pExpr) ); sqlite3TreeViewLine(pView, "{%d:%d} pTab=%p%s", pExpr->iTable, pExpr->iColumn, pExpr->y.pTab, zFlgs); @@ -30397,11 +31275,13 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m } #ifndef SQLITE_OMIT_FLOATING_POINT case TK_FLOAT: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken); break; } #endif case TK_STRING: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView,"%Q", pExpr->u.zToken); break; } @@ -30410,17 +31290,19 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m break; } case TK_TRUEFALSE: { - sqlite3TreeViewLine(pView, - sqlite3ExprTruthValue(pExpr) ? "TRUE" : "FALSE"); + sqlite3TreeViewLine(pView,"%s%s", + sqlite3ExprTruthValue(pExpr) ? "TRUE" : "FALSE", zFlgs); break; } #ifndef SQLITE_OMIT_BLOB_LITERAL case TK_BLOB: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken); break; } #endif case TK_VARIABLE: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView,"VARIABLE(%s,%d)", pExpr->u.zToken, pExpr->iColumn); break; @@ -30430,12 +31312,14 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m break; } case TK_ID: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView,"ID \"%w\"", pExpr->u.zToken); break; } #ifndef SQLITE_OMIT_CAST case TK_CAST: { /* Expressions of the form: CAST(pLeft AS token) */ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView,"CAST %Q", pExpr->u.zToken); sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); break; @@ -30485,6 +31369,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m } case TK_SPAN: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView, "SPAN %Q", pExpr->u.zToken); sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); break; @@ -30496,6 +31381,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m ** up in the treeview output as "SOFT-COLLATE". Explicit COLLATE ** operators that appear in the original SQL always have the ** EP_Collate bit set and appear in treeview output as just "COLLATE" */ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView, "%sCOLLATE %Q%s", !ExprHasProperty(pExpr, EP_Collate) ? "SOFT-" : "", pExpr->u.zToken, zFlgs); @@ -30511,6 +31397,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m pFarg = 0; pWin = 0; }else{ + assert( ExprUseXList(pExpr) ); pFarg = pExpr->x.pList; #ifndef SQLITE_OMIT_WINDOWFUNC pWin = ExprHasProperty(pExpr, EP_WinFunc) ? pExpr->y.pWin : 0; @@ -30518,6 +31405,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m pWin = 0; #endif } + assert( !ExprHasProperty(pExpr, EP_IntValue) ); if( pExpr->op==TK_AGG_FUNCTION ){ sqlite3TreeViewLine(pView, "AGG_FUNCTION%d %Q%s agg=%d[%d]/%p", pExpr->op2, pExpr->u.zToken, zFlgs, @@ -30549,11 +31437,13 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m } #ifndef SQLITE_OMIT_SUBQUERY case TK_EXISTS: { + assert( ExprUseXSelect(pExpr) ); sqlite3TreeViewLine(pView, "EXISTS-expr flags=0x%x", pExpr->flags); sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); break; } case TK_SELECT: { + assert( ExprUseXSelect(pExpr) ); sqlite3TreeViewLine(pView, "subquery-expr flags=0x%x", pExpr->flags); sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); break; @@ -30561,7 +31451,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m case TK_IN: { sqlite3TreeViewLine(pView, "IN flags=0x%x", pExpr->flags); sqlite3TreeViewExpr(pView, pExpr->pLeft, 1); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); }else{ sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0); @@ -30582,9 +31472,12 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m ** Z is stored in pExpr->pList->a[1].pExpr. */ case TK_BETWEEN: { - Expr *pX = pExpr->pLeft; - Expr *pY = pExpr->x.pList->a[0].pExpr; - Expr *pZ = pExpr->x.pList->a[1].pExpr; + const Expr *pX, *pY, *pZ; + pX = pExpr->pLeft; + assert( ExprUseXList(pExpr) ); + assert( pExpr->x.pList->nExpr==2 ); + pY = pExpr->x.pList->a[0].pExpr; + pZ = pExpr->x.pList->a[1].pExpr; sqlite3TreeViewLine(pView, "BETWEEN"); sqlite3TreeViewExpr(pView, pX, 1); sqlite3TreeViewExpr(pView, pY, 1); @@ -30606,6 +31499,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m case TK_CASE: { sqlite3TreeViewLine(pView, "CASE"); sqlite3TreeViewExpr(pView, pExpr->pLeft, 1); + assert( ExprUseXList(pExpr) ); sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0); break; } @@ -30618,6 +31512,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m case OE_Fail: zType = "fail"; break; case OE_Ignore: zType = "ignore"; break; } + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3TreeViewLine(pView, "RAISE %s(%Q)", zType, pExpr->u.zToken); break; } @@ -30630,12 +31525,16 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m } case TK_VECTOR: { char *z = sqlite3_mprintf("VECTOR%s",zFlgs); + assert( ExprUseXList(pExpr) ); sqlite3TreeViewBareExprList(pView, pExpr->x.pList, z); sqlite3_free(z); break; } case TK_SELECT_COLUMN: { - sqlite3TreeViewLine(pView, "SELECT-COLUMN %d", pExpr->iColumn); + sqlite3TreeViewLine(pView, "SELECT-COLUMN %d of [0..%d]%s", + pExpr->iColumn, pExpr->iTable-1, + pExpr->pRight==pExpr->pLeft ? " (SELECT-owner)" : ""); + assert( ExprUseXSelect(pExpr->pLeft) ); sqlite3TreeViewSelect(pView, pExpr->pLeft->x.pSelect, 0); break; } @@ -30652,6 +31551,15 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m sqlite3TreeViewExpr(pView, &tmp, 0); break; } + case TK_ROW: { + if( pExpr->iColumn<=0 ){ + sqlite3TreeViewLine(pView, "First FROM table rowid"); + }else{ + sqlite3TreeViewLine(pView, "First FROM table column %d", + pExpr->iColumn-1); + } + break; + } default: { sqlite3TreeViewLine(pView, "op=%d", pExpr->op); break; @@ -31703,16 +32611,6 @@ SQLITE_PRIVATE void sqlite3UtfSelfTest(void){ #include #endif -/* -** Routine needed to support the testcase() macro. -*/ -#ifdef SQLITE_COVERAGE_TEST -SQLITE_PRIVATE void sqlite3Coverage(int x){ - static unsigned dummy = 0; - dummy += (unsigned)x; -} -#endif - /* ** Calls to sqlite3FaultSim() are used to simulate a failure during testing, ** or to bypass normal error detection during testing in order to let @@ -31742,11 +32640,21 @@ SQLITE_PRIVATE int sqlite3FaultSim(int iTest){ #ifndef SQLITE_OMIT_FLOATING_POINT /* ** Return true if the floating point value is Not a Number (NaN). +** +** Use the math library isnan() function if compiled with SQLITE_HAVE_ISNAN. +** Otherwise, we have our own implementation that works on most systems. */ SQLITE_PRIVATE int sqlite3IsNaN(double x){ + int rc; /* The value return */ +#if !SQLITE_HAVE_ISNAN && !HAVE_ISNAN u64 y; memcpy(&y,&x,sizeof(y)); - return IsNaN(y); + rc = IsNaN(y); +#else + rc = isnan(x); +#endif /* HAVE_ISNAN */ + testcase( rc ); + return rc; } #endif /* SQLITE_OMIT_FLOATING_POINT */ @@ -31771,8 +32679,14 @@ SQLITE_PRIVATE int sqlite3Strlen30(const char *z){ ** the column name if and only if the COLFLAG_HASTYPE flag is set. */ SQLITE_PRIVATE char *sqlite3ColumnType(Column *pCol, char *zDflt){ - if( (pCol->colFlags & COLFLAG_HASTYPE)==0 ) return zDflt; - return pCol->zName + strlen(pCol->zName) + 1; + if( pCol->colFlags & COLFLAG_HASTYPE ){ + return pCol->zCnName + strlen(pCol->zCnName) + 1; + }else if( pCol->eCType ){ + assert( pCol->eCType<=SQLITE_N_STDTYPE ); + return (char*)sqlite3StdType[pCol->eCType-1]; + }else{ + return zDflt; + } } /* @@ -31793,7 +32707,11 @@ static SQLITE_NOINLINE void sqlite3ErrorFinish(sqlite3 *db, int err_code){ SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){ assert( db!=0 ); db->errCode = err_code; - if( err_code || db->pErr ) sqlite3ErrorFinish(db, err_code); + if( err_code || db->pErr ){ + sqlite3ErrorFinish(db, err_code); + }else{ + db->errByteOffset = -1; + } } /* @@ -31803,6 +32721,7 @@ SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){ SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3 *db){ assert( db!=0 ); db->errCode = SQLITE_OK; + db->errByteOffset = -1; if( db->pErr ) sqlite3ValueSetNull(db->pErr); } @@ -31823,17 +32742,8 @@ SQLITE_PRIVATE void sqlite3SystemError(sqlite3 *db, int rc){ ** handle "db". The error code is set to "err_code". ** ** If it is not NULL, string zFormat specifies the format of the -** error string in the style of the printf functions: The following -** format characters are allowed: -** -** %s Insert a string -** %z A string that should be freed after use -** %d Insert an integer -** %T Insert a token -** %S Insert the first element of a SrcList -** -** zFormat and any string tokens that follow it are assumed to be -** encoded in UTF-8. +** error string. zFormat and any string tokens that follow it are +** assumed to be encoded in UTF-8. ** ** To clear the most recent error for sqlite handle "db", sqlite3Error ** should be called with err_code set to SQLITE_OK and zFormat set @@ -31857,13 +32767,6 @@ SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3 *db, int err_code, const char *z /* ** Add an error message to pParse->zErrMsg and increment pParse->nErr. -** The following formatting characters are allowed: -** -** %s Insert a string -** %z A string that should be freed after use -** %d Insert an integer -** %T Insert a token -** %S Insert the first element of a SrcList ** ** This function should be used to report any error that occurs while ** compiling an SQL statement (i.e. within sqlite3_prepare()). The @@ -31876,11 +32779,19 @@ SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){ char *zMsg; va_list ap; sqlite3 *db = pParse->db; + assert( db!=0 ); + assert( db->pParse==pParse ); + db->errByteOffset = -2; va_start(ap, zFormat); zMsg = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); + if( db->errByteOffset<-1 ) db->errByteOffset = -1; if( db->suppressErr ){ sqlite3DbFree(db, zMsg); + if( db->mallocFailed ){ + pParse->nErr++; + pParse->rc = SQLITE_NOMEM; + } }else{ pParse->nErr++; sqlite3DbFree(db, pParse->zErrMsg); @@ -31943,11 +32854,34 @@ SQLITE_PRIVATE void sqlite3Dequote(char *z){ z[j] = 0; } SQLITE_PRIVATE void sqlite3DequoteExpr(Expr *p){ + assert( !ExprHasProperty(p, EP_IntValue) ); assert( sqlite3Isquote(p->u.zToken[0]) ); p->flags |= p->u.zToken[0]=='"' ? EP_Quoted|EP_DblQuoted : EP_Quoted; sqlite3Dequote(p->u.zToken); } +/* +** If the input token p is quoted, try to adjust the token to remove +** the quotes. This is not always possible: +** +** "abc" -> abc +** "ab""cd" -> (not possible because of the interior "") +** +** Remove the quotes if possible. This is a optimization. The overall +** system should still return the correct answer even if this routine +** is always a no-op. +*/ +SQLITE_PRIVATE void sqlite3DequoteToken(Token *p){ + unsigned int i; + if( p->n<2 ) return; + if( !sqlite3Isquote(p->z[0]) ) return; + for(i=1; in-1; i++){ + if( sqlite3Isquote(p->z[i]) ) return; + } + p->n -= 2; + p->z++; +} + /* ** Generate a Token object from a string */ @@ -33053,13 +33987,13 @@ static void logBadConnection(const char *zType){ ** used as an argument to sqlite3_errmsg() or sqlite3_close(). */ SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3 *db){ - u32 magic; + u8 eOpenState; if( db==0 ){ logBadConnection("NULL"); return 0; } - magic = db->magic; - if( magic!=SQLITE_MAGIC_OPEN ){ + eOpenState = db->eOpenState; + if( eOpenState!=SQLITE_STATE_OPEN ){ if( sqlite3SafetyCheckSickOrOk(db) ){ testcase( sqlite3GlobalConfig.xLog!=0 ); logBadConnection("unopened"); @@ -33070,11 +34004,11 @@ SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3 *db){ } } SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){ - u32 magic; - magic = db->magic; - if( magic!=SQLITE_MAGIC_SICK && - magic!=SQLITE_MAGIC_OPEN && - magic!=SQLITE_MAGIC_BUSY ){ + u8 eOpenState; + eOpenState = db->eOpenState; + if( eOpenState!=SQLITE_STATE_SICK && + eOpenState!=SQLITE_STATE_OPEN && + eOpenState!=SQLITE_STATE_BUSY ){ testcase( sqlite3GlobalConfig.xLog!=0 ); logBadConnection("invalid"); return 0; @@ -33239,7 +34173,6 @@ SQLITE_PRIVATE LogEst sqlite3LogEst(u64 x){ return a[x&7] + y - 10; } -#ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Convert a double into a LogEst ** In other words, compute an approximation for 10*log2(x). @@ -33254,16 +34187,9 @@ SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double x){ e = (a>>52) - 1022; return e*10; } -#endif /* SQLITE_OMIT_VIRTUALTABLE */ -#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \ - defined(SQLITE_ENABLE_STAT4) || \ - defined(SQLITE_EXPLAIN_ESTIMATED_ROWS) /* ** Convert a LogEst into an integer. -** -** Note that this routine is only used when one or more of various -** non-standard compile-time options is enabled. */ SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){ u64 n; @@ -33271,17 +34197,9 @@ SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){ x /= 10; if( n>=5 ) n -= 2; else if( n>=1 ) n -= 1; -#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \ - defined(SQLITE_EXPLAIN_ESTIMATED_ROWS) if( x>60 ) return (u64)LARGEST_INT64; -#else - /* If only SQLITE_ENABLE_STAT4 is on, then the largest input - ** possible to this routine is 310, resulting in a maximum x of 31 */ - assert( x<=60 ); -#endif return x>=3 ? (n+8)<<(x-3) : (n+8)>>(3-x); } -#endif /* defined SCANSTAT or STAT4 or ESTIMATED_ROWS */ /* ** Add a new name/number pair to a VList. This might require that the @@ -33693,35 +34611,35 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 18 */ "If" OpHelp(""), /* 19 */ "Not" OpHelp("r[P2]= !r[P1]"), /* 20 */ "IfNot" OpHelp(""), - /* 21 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"), - /* 22 */ "SeekLT" OpHelp("key=r[P3@P4]"), - /* 23 */ "SeekLE" OpHelp("key=r[P3@P4]"), - /* 24 */ "SeekGE" OpHelp("key=r[P3@P4]"), - /* 25 */ "SeekGT" OpHelp("key=r[P3@P4]"), - /* 26 */ "IfNotOpen" OpHelp("if( !csr[P1] ) goto P2"), - /* 27 */ "IfNoHope" OpHelp("key=r[P3@P4]"), - /* 28 */ "NoConflict" OpHelp("key=r[P3@P4]"), - /* 29 */ "NotFound" OpHelp("key=r[P3@P4]"), - /* 30 */ "Found" OpHelp("key=r[P3@P4]"), - /* 31 */ "SeekRowid" OpHelp("intkey=r[P3]"), - /* 32 */ "NotExists" OpHelp("intkey=r[P3]"), - /* 33 */ "Last" OpHelp(""), - /* 34 */ "IfSmaller" OpHelp(""), - /* 35 */ "SorterSort" OpHelp(""), - /* 36 */ "Sort" OpHelp(""), - /* 37 */ "Rewind" OpHelp(""), - /* 38 */ "IdxLE" OpHelp("key=r[P3@P4]"), - /* 39 */ "IdxGT" OpHelp("key=r[P3@P4]"), - /* 40 */ "IdxLT" OpHelp("key=r[P3@P4]"), - /* 41 */ "IdxGE" OpHelp("key=r[P3@P4]"), - /* 42 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), + /* 21 */ "IsNullOrType" OpHelp("if typeof(r[P1]) IN (P3,5) goto P2"), + /* 22 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"), + /* 23 */ "SeekLT" OpHelp("key=r[P3@P4]"), + /* 24 */ "SeekLE" OpHelp("key=r[P3@P4]"), + /* 25 */ "SeekGE" OpHelp("key=r[P3@P4]"), + /* 26 */ "SeekGT" OpHelp("key=r[P3@P4]"), + /* 27 */ "IfNotOpen" OpHelp("if( !csr[P1] ) goto P2"), + /* 28 */ "IfNoHope" OpHelp("key=r[P3@P4]"), + /* 29 */ "NoConflict" OpHelp("key=r[P3@P4]"), + /* 30 */ "NotFound" OpHelp("key=r[P3@P4]"), + /* 31 */ "Found" OpHelp("key=r[P3@P4]"), + /* 32 */ "SeekRowid" OpHelp("intkey=r[P3]"), + /* 33 */ "NotExists" OpHelp("intkey=r[P3]"), + /* 34 */ "Last" OpHelp(""), + /* 35 */ "IfSmaller" OpHelp(""), + /* 36 */ "SorterSort" OpHelp(""), + /* 37 */ "Sort" OpHelp(""), + /* 38 */ "Rewind" OpHelp(""), + /* 39 */ "IdxLE" OpHelp("key=r[P3@P4]"), + /* 40 */ "IdxGT" OpHelp("key=r[P3@P4]"), + /* 41 */ "IdxLT" OpHelp("key=r[P3@P4]"), + /* 42 */ "IdxGE" OpHelp("key=r[P3@P4]"), /* 43 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"), /* 44 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"), - /* 45 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), - /* 46 */ "Program" OpHelp(""), - /* 47 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), - /* 48 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), - /* 49 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), + /* 45 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), + /* 46 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), + /* 47 */ "Program" OpHelp(""), + /* 48 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), + /* 49 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), /* 50 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), /* 51 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), /* 52 */ "Ne" OpHelp("IF r[P3]!=r[P1]"), @@ -33731,49 +34649,49 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 56 */ "Lt" OpHelp("IF r[P3]=r[P1]"), /* 58 */ "ElseEq" OpHelp(""), - /* 59 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), - /* 60 */ "IncrVacuum" OpHelp(""), - /* 61 */ "VNext" OpHelp(""), - /* 62 */ "Init" OpHelp("Start at P2"), - /* 63 */ "PureFunc" OpHelp("r[P3]=func(r[P2@NP])"), - /* 64 */ "Function" OpHelp("r[P3]=func(r[P2@NP])"), - /* 65 */ "Return" OpHelp(""), - /* 66 */ "EndCoroutine" OpHelp(""), - /* 67 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), - /* 68 */ "Halt" OpHelp(""), - /* 69 */ "Integer" OpHelp("r[P2]=P1"), - /* 70 */ "Int64" OpHelp("r[P2]=P4"), - /* 71 */ "String" OpHelp("r[P2]='P4' (len=P1)"), - /* 72 */ "Null" OpHelp("r[P2..P3]=NULL"), - /* 73 */ "SoftNull" OpHelp("r[P1]=NULL"), - /* 74 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), - /* 75 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), - /* 76 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), - /* 77 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), - /* 78 */ "SCopy" OpHelp("r[P2]=r[P1]"), - /* 79 */ "IntCopy" OpHelp("r[P2]=r[P1]"), - /* 80 */ "ChngCntRow" OpHelp("output=r[P1]"), - /* 81 */ "ResultRow" OpHelp("output=r[P1@P2]"), - /* 82 */ "CollSeq" OpHelp(""), - /* 83 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), - /* 84 */ "RealAffinity" OpHelp(""), - /* 85 */ "Cast" OpHelp("affinity(r[P1])"), - /* 86 */ "Permutation" OpHelp(""), - /* 87 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), - /* 88 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), - /* 89 */ "ZeroOrNull" OpHelp("r[P2] = 0 OR NULL"), - /* 90 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), - /* 91 */ "Column" OpHelp("r[P3]=PX"), - /* 92 */ "Affinity" OpHelp("affinity(r[P1@P2])"), - /* 93 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), - /* 94 */ "Count" OpHelp("r[P2]=count()"), - /* 95 */ "ReadCookie" OpHelp(""), - /* 96 */ "SetCookie" OpHelp(""), - /* 97 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), - /* 98 */ "OpenRead" OpHelp("root=P2 iDb=P3"), - /* 99 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), - /* 100 */ "OpenDup" OpHelp(""), - /* 101 */ "OpenAutoindex" OpHelp("nColumn=P2"), + /* 59 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), + /* 60 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), + /* 61 */ "IncrVacuum" OpHelp(""), + /* 62 */ "VNext" OpHelp(""), + /* 63 */ "Filter" OpHelp("if key(P3@P4) not in filter(P1) goto P2"), + /* 64 */ "Init" OpHelp("Start at P2"), + /* 65 */ "PureFunc" OpHelp("r[P3]=func(r[P2@NP])"), + /* 66 */ "Function" OpHelp("r[P3]=func(r[P2@NP])"), + /* 67 */ "Return" OpHelp(""), + /* 68 */ "EndCoroutine" OpHelp(""), + /* 69 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), + /* 70 */ "Halt" OpHelp(""), + /* 71 */ "Integer" OpHelp("r[P2]=P1"), + /* 72 */ "Int64" OpHelp("r[P2]=P4"), + /* 73 */ "String" OpHelp("r[P2]='P4' (len=P1)"), + /* 74 */ "Null" OpHelp("r[P2..P3]=NULL"), + /* 75 */ "SoftNull" OpHelp("r[P1]=NULL"), + /* 76 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), + /* 77 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), + /* 78 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), + /* 79 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), + /* 80 */ "SCopy" OpHelp("r[P2]=r[P1]"), + /* 81 */ "IntCopy" OpHelp("r[P2]=r[P1]"), + /* 82 */ "FkCheck" OpHelp(""), + /* 83 */ "ResultRow" OpHelp("output=r[P1@P2]"), + /* 84 */ "CollSeq" OpHelp(""), + /* 85 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), + /* 86 */ "RealAffinity" OpHelp(""), + /* 87 */ "Cast" OpHelp("affinity(r[P1])"), + /* 88 */ "Permutation" OpHelp(""), + /* 89 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), + /* 90 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), + /* 91 */ "ZeroOrNull" OpHelp("r[P2] = 0 OR NULL"), + /* 92 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), + /* 93 */ "Column" OpHelp("r[P3]=PX"), + /* 94 */ "TypeCheck" OpHelp("typecheck(r[P1@P2])"), + /* 95 */ "Affinity" OpHelp("affinity(r[P1@P2])"), + /* 96 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), + /* 97 */ "Count" OpHelp("r[P2]=count()"), + /* 98 */ "ReadCookie" OpHelp(""), + /* 99 */ "SetCookie" OpHelp(""), + /* 100 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), + /* 101 */ "OpenRead" OpHelp("root=P2 iDb=P3"), /* 102 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), /* 103 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), /* 104 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), - /* 157 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"), - /* 158 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 159 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 160 */ "AggValue" OpHelp("r[P3]=value N=P2"), - /* 161 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), - /* 162 */ "Expire" OpHelp(""), - /* 163 */ "CursorLock" OpHelp(""), - /* 164 */ "CursorUnlock" OpHelp(""), - /* 165 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), - /* 166 */ "VBegin" OpHelp(""), - /* 167 */ "VCreate" OpHelp(""), - /* 168 */ "VDestroy" OpHelp(""), - /* 169 */ "VOpen" OpHelp(""), - /* 170 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 171 */ "VRename" OpHelp(""), - /* 172 */ "Pagecount" OpHelp(""), - /* 173 */ "MaxPgcnt" OpHelp(""), - /* 174 */ "Trace" OpHelp(""), - /* 175 */ "CursorHint" OpHelp(""), - /* 176 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), - /* 177 */ "Noop" OpHelp(""), - /* 178 */ "Explain" OpHelp(""), - /* 179 */ "Abortable" OpHelp(""), + /* 112 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), + /* 113 */ "OpenDup" OpHelp(""), + /* 114 */ "BitNot" OpHelp("r[P2]= ~r[P1]"), + /* 115 */ "OpenAutoindex" OpHelp("nColumn=P2"), + /* 116 */ "OpenEphemeral" OpHelp("nColumn=P2"), + /* 117 */ "String8" OpHelp("r[P2]='P4'"), + /* 118 */ "SorterOpen" OpHelp(""), + /* 119 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), + /* 120 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), + /* 121 */ "Close" OpHelp(""), + /* 122 */ "ColumnsUsed" OpHelp(""), + /* 123 */ "SeekScan" OpHelp("Scan-ahead up to P1 rows"), + /* 124 */ "SeekHit" OpHelp("set P2<=seekHit<=P3"), + /* 125 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), + /* 126 */ "NewRowid" OpHelp("r[P2]=rowid"), + /* 127 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), + /* 128 */ "RowCell" OpHelp(""), + /* 129 */ "Delete" OpHelp(""), + /* 130 */ "ResetCount" OpHelp(""), + /* 131 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), + /* 132 */ "SorterData" OpHelp("r[P2]=data"), + /* 133 */ "RowData" OpHelp("r[P2]=data"), + /* 134 */ "Rowid" OpHelp("r[P2]=rowid"), + /* 135 */ "NullRow" OpHelp(""), + /* 136 */ "SeekEnd" OpHelp(""), + /* 137 */ "IdxInsert" OpHelp("key=r[P2]"), + /* 138 */ "SorterInsert" OpHelp("key=r[P2]"), + /* 139 */ "IdxDelete" OpHelp("key=r[P2@P3]"), + /* 140 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"), + /* 141 */ "IdxRowid" OpHelp("r[P2]=rowid"), + /* 142 */ "FinishSeek" OpHelp(""), + /* 143 */ "Destroy" OpHelp(""), + /* 144 */ "Clear" OpHelp(""), + /* 145 */ "ResetSorter" OpHelp(""), + /* 146 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"), + /* 147 */ "SqlExec" OpHelp(""), + /* 148 */ "ParseSchema" OpHelp(""), + /* 149 */ "LoadAnalysis" OpHelp(""), + /* 150 */ "DropTable" OpHelp(""), + /* 151 */ "DropIndex" OpHelp(""), + /* 152 */ "DropTrigger" OpHelp(""), + /* 153 */ "Real" OpHelp("r[P2]=P4"), + /* 154 */ "IntegrityCk" OpHelp(""), + /* 155 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), + /* 156 */ "Param" OpHelp(""), + /* 157 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), + /* 158 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), + /* 159 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), + /* 160 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"), + /* 161 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 162 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 163 */ "AggValue" OpHelp("r[P3]=value N=P2"), + /* 164 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), + /* 165 */ "Expire" OpHelp(""), + /* 166 */ "CursorLock" OpHelp(""), + /* 167 */ "CursorUnlock" OpHelp(""), + /* 168 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), + /* 169 */ "VBegin" OpHelp(""), + /* 170 */ "VCreate" OpHelp(""), + /* 171 */ "VDestroy" OpHelp(""), + /* 172 */ "VOpen" OpHelp(""), + /* 173 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), + /* 174 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 175 */ "VRename" OpHelp(""), + /* 176 */ "Pagecount" OpHelp(""), + /* 177 */ "MaxPgcnt" OpHelp(""), + /* 178 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), + /* 179 */ "Trace" OpHelp(""), + /* 180 */ "CursorHint" OpHelp(""), + /* 181 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), + /* 182 */ "Noop" OpHelp(""), + /* 183 */ "Explain" OpHelp(""), + /* 184 */ "Abortable" OpHelp(""), }; return azName[i]; } @@ -34158,205 +35081,7 @@ static pid_t randomnessPid = 0; /* ** Include code that is common to all os_*.c files */ -/************** Include os_common.h in the middle of os_unix.c ***************/ -/************** Begin file os_common.h ***************************************/ -/* -** 2004 May 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains macros and a little bit of code that is common to -** all of the platform-specific files (os_*.c) and is #included into those -** files. -** -** This file should be #included by the os_*.c files only. It is not a -** general purpose header file. -*/ -#ifndef _OS_COMMON_H_ -#define _OS_COMMON_H_ - -/* -** At least two bugs have slipped in because we changed the MEMORY_DEBUG -** macro to SQLITE_DEBUG and some older makefiles have not yet made the -** switch. The following code should catch this problem at compile-time. -*/ -#ifdef MEMORY_DEBUG -# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." -#endif - -/* -** Macros for performance tracing. Normally turned off. Only works -** on i486 hardware. -*/ -#ifdef SQLITE_PERFORMANCE_TRACE - -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of os_common.h ****************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 and x86_64 class CPUs. -*/ -#ifndef SQLITE_HWTIME_H -#define SQLITE_HWTIME_H - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if !defined(__STRICT_ANSI__) && \ - (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - /* - ** asm() is needed for hardware timing support. Without asm(), - ** disable the sqlite3Hwtime() routine. - ** - ** sqlite3Hwtime() is only used for some obscure debugging - ** and analysis configurations, not in any deliverable, so this - ** should not be a great loss. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(SQLITE_HWTIME_H) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in os_common.h ******************/ - -static sqlite_uint64 g_start; -static sqlite_uint64 g_elapsed; -#define TIMER_START g_start=sqlite3Hwtime() -#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start -#define TIMER_ELAPSED g_elapsed -#else -#define TIMER_START -#define TIMER_END -#define TIMER_ELAPSED ((sqlite_uint64)0) -#endif - -/* -** If we compile with the SQLITE_TEST macro set, then the following block -** of code will give us the ability to simulate a disk I/O error. This -** is used for testing the I/O recovery logic. -*/ -#if defined(SQLITE_TEST) -SQLITE_API extern int sqlite3_io_error_hit; -SQLITE_API extern int sqlite3_io_error_hardhit; -SQLITE_API extern int sqlite3_io_error_pending; -SQLITE_API extern int sqlite3_io_error_persist; -SQLITE_API extern int sqlite3_io_error_benign; -SQLITE_API extern int sqlite3_diskfull_pending; -SQLITE_API extern int sqlite3_diskfull; -#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) -#define SimulateIOError(CODE) \ - if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ - || sqlite3_io_error_pending-- == 1 ) \ - { local_ioerr(); CODE; } -static void local_ioerr(){ - IOTRACE(("IOERR\n")); - sqlite3_io_error_hit++; - if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; -} -#define SimulateDiskfullError(CODE) \ - if( sqlite3_diskfull_pending ){ \ - if( sqlite3_diskfull_pending == 1 ){ \ - local_ioerr(); \ - sqlite3_diskfull = 1; \ - sqlite3_io_error_hit = 1; \ - CODE; \ - }else{ \ - sqlite3_diskfull_pending--; \ - } \ - } -#else -#define SimulateIOErrorBenign(X) -#define SimulateIOError(A) -#define SimulateDiskfullError(A) -#endif /* defined(SQLITE_TEST) */ - -/* -** When testing, keep a count of the number of open files. -*/ -#if defined(SQLITE_TEST) -SQLITE_API extern int sqlite3_open_file_count; -#define OpenCounter(X) sqlite3_open_file_count+=(X) -#else -#define OpenCounter(X) -#endif /* defined(SQLITE_TEST) */ - -#endif /* !defined(_OS_COMMON_H_) */ - -/************** End of os_common.h *******************************************/ -/************** Continuing where we left off in os_unix.c ********************/ +/* #include "os_common.h" */ /* ** Define various macros that are missing from some systems. @@ -38010,7 +38735,9 @@ static void unixModeBit(unixFile *pFile, unsigned char mask, int *pArg){ /* Forward declaration */ static int unixGetTempname(int nBuf, char *zBuf); -static int unixFcntlExternalReader(unixFile*, int*); +#ifndef SQLITE_OMIT_WAL + static int unixFcntlExternalReader(unixFile*, int*); +#endif /* ** Information and control of an open file handle. @@ -38129,7 +38856,12 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ #endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */ case SQLITE_FCNTL_EXTERNAL_READER: { +#ifndef SQLITE_OMIT_WAL return unixFcntlExternalReader((unixFile*)id, (int*)pArg); +#else + *(int*)pArg = 0; + return SQLITE_OK; +#endif } } return SQLITE_NOTFOUND; @@ -38962,11 +39694,17 @@ static int unixShmLock( int flags /* What to do with the lock */ ){ unixFile *pDbFd = (unixFile*)fd; /* Connection holding shared memory */ - unixShm *p = pDbFd->pShm; /* The shared memory being locked */ - unixShmNode *pShmNode = p->pShmNode; /* The underlying file iNode */ + unixShm *p; /* The shared memory being locked */ + unixShmNode *pShmNode; /* The underlying file iNode */ int rc = SQLITE_OK; /* Result code */ u16 mask; /* Mask of locks to take or release */ - int *aLock = pShmNode->aLock; + int *aLock; + + p = pDbFd->pShm; + if( p==0 ) return SQLITE_IOERR_SHMLOCK; + pShmNode = p->pShmNode; + if( NEVER(pShmNode==0) ) return SQLITE_IOERR_SHMLOCK; + aLock = pShmNode->aLock; assert( pShmNode==pDbFd->pInode->pShmNode ); assert( pShmNode->pInode==pDbFd->pInode ); @@ -39850,25 +40588,35 @@ static int fillInUnixFile( return rc; } +/* +** Directories to consider for temp files. +*/ +static const char *azTempDirs[] = { + 0, + 0, + "/var/tmp", + "/usr/tmp", + "/tmp", + "." +}; + +/* +** Initialize first two members of azTempDirs[] array. +*/ +static void unixTempFileInit(void){ + azTempDirs[0] = getenv("SQLITE_TMPDIR"); + azTempDirs[1] = getenv("TMPDIR"); +} + /* ** Return the name of a directory in which to put temporary files. ** If no suitable temporary file directory can be found, return NULL. */ static const char *unixTempFileDir(void){ - static const char *azDirs[] = { - 0, - 0, - "/var/tmp", - "/usr/tmp", - "/tmp", - "." - }; unsigned int i = 0; struct stat buf; const char *zDir = sqlite3_temp_directory; - if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); - if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); while(1){ if( zDir!=0 && osStat(zDir, &buf)==0 @@ -39877,8 +40625,8 @@ static const char *unixTempFileDir(void){ ){ return zDir; } - if( i>=sizeof(azDirs)/sizeof(azDirs[0]) ) break; - zDir = azDirs[i++]; + if( i>=sizeof(azTempDirs)/sizeof(azTempDirs[0]) ) break; + zDir = azTempDirs[i++]; } return 0; } @@ -40184,6 +40932,11 @@ static int unixOpen( } memset(p, 0, sizeof(unixFile)); +#ifdef SQLITE_ASSERT_NO_FILES + /* Applications that never read or write a persistent disk files */ + assert( zName==0 ); +#endif + if( eType==SQLITE_OPEN_MAIN_DB ){ UnixUnusedFd *pUnused; pUnused = findReusableFd(zName, flags); @@ -42145,6 +42898,9 @@ SQLITE_API int sqlite3_os_init(void){ assert( UNIX_SHM_DMS==128 ); /* Byte offset of the deadman-switch */ #endif + /* Initialize temp file dir array. */ + unixTempFileInit(); + return SQLITE_OK; } @@ -42184,205 +42940,7 @@ SQLITE_API int sqlite3_os_end(void){ /* ** Include code that is common to all os_*.c files */ -/************** Include os_common.h in the middle of os_win.c ****************/ -/************** Begin file os_common.h ***************************************/ -/* -** 2004 May 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains macros and a little bit of code that is common to -** all of the platform-specific files (os_*.c) and is #included into those -** files. -** -** This file should be #included by the os_*.c files only. It is not a -** general purpose header file. -*/ -#ifndef _OS_COMMON_H_ -#define _OS_COMMON_H_ - -/* -** At least two bugs have slipped in because we changed the MEMORY_DEBUG -** macro to SQLITE_DEBUG and some older makefiles have not yet made the -** switch. The following code should catch this problem at compile-time. -*/ -#ifdef MEMORY_DEBUG -# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." -#endif - -/* -** Macros for performance tracing. Normally turned off. Only works -** on i486 hardware. -*/ -#ifdef SQLITE_PERFORMANCE_TRACE - -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of os_common.h ****************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 and x86_64 class CPUs. -*/ -#ifndef SQLITE_HWTIME_H -#define SQLITE_HWTIME_H - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if !defined(__STRICT_ANSI__) && \ - (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - /* - ** asm() is needed for hardware timing support. Without asm(), - ** disable the sqlite3Hwtime() routine. - ** - ** sqlite3Hwtime() is only used for some obscure debugging - ** and analysis configurations, not in any deliverable, so this - ** should not be a great loss. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(SQLITE_HWTIME_H) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in os_common.h ******************/ - -static sqlite_uint64 g_start; -static sqlite_uint64 g_elapsed; -#define TIMER_START g_start=sqlite3Hwtime() -#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start -#define TIMER_ELAPSED g_elapsed -#else -#define TIMER_START -#define TIMER_END -#define TIMER_ELAPSED ((sqlite_uint64)0) -#endif - -/* -** If we compile with the SQLITE_TEST macro set, then the following block -** of code will give us the ability to simulate a disk I/O error. This -** is used for testing the I/O recovery logic. -*/ -#if defined(SQLITE_TEST) -SQLITE_API extern int sqlite3_io_error_hit; -SQLITE_API extern int sqlite3_io_error_hardhit; -SQLITE_API extern int sqlite3_io_error_pending; -SQLITE_API extern int sqlite3_io_error_persist; -SQLITE_API extern int sqlite3_io_error_benign; -SQLITE_API extern int sqlite3_diskfull_pending; -SQLITE_API extern int sqlite3_diskfull; -#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) -#define SimulateIOError(CODE) \ - if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ - || sqlite3_io_error_pending-- == 1 ) \ - { local_ioerr(); CODE; } -static void local_ioerr(){ - IOTRACE(("IOERR\n")); - sqlite3_io_error_hit++; - if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; -} -#define SimulateDiskfullError(CODE) \ - if( sqlite3_diskfull_pending ){ \ - if( sqlite3_diskfull_pending == 1 ){ \ - local_ioerr(); \ - sqlite3_diskfull = 1; \ - sqlite3_io_error_hit = 1; \ - CODE; \ - }else{ \ - sqlite3_diskfull_pending--; \ - } \ - } -#else -#define SimulateIOErrorBenign(X) -#define SimulateIOError(A) -#define SimulateDiskfullError(A) -#endif /* defined(SQLITE_TEST) */ - -/* -** When testing, keep a count of the number of open files. -*/ -#if defined(SQLITE_TEST) -SQLITE_API extern int sqlite3_open_file_count; -#define OpenCounter(X) sqlite3_open_file_count+=(X) -#else -#define OpenCounter(X) -#endif /* defined(SQLITE_TEST) */ - -#endif /* !defined(_OS_COMMON_H_) */ - -/************** End of os_common.h *******************************************/ -/************** Continuing where we left off in os_win.c *********************/ +/* #include "os_common.h" */ /* ** Include the header file for the Windows VFS. @@ -46434,10 +46992,14 @@ static int winShmLock( winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */ winShm *p = pDbFd->pShm; /* The shared memory being locked */ winShm *pX; /* For looping over all siblings */ - winShmNode *pShmNode = p->pShmNode; + winShmNode *pShmNode; int rc = SQLITE_OK; /* Result code */ u16 mask; /* Mask of locks to take or release */ + if( p==0 ) return SQLITE_IOERR_SHMLOCK; + pShmNode = p->pShmNode; + if( NEVER(pShmNode==0) ) return SQLITE_IOERR_SHMLOCK; + assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); assert( n>=1 ); assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) @@ -48793,7 +49355,7 @@ static int memdbRead( */ static int memdbEnlarge(MemStore *p, sqlite3_int64 newSz){ unsigned char *pNew; - if( (p->mFlags & SQLITE_DESERIALIZE_RESIZEABLE)==0 || p->nMmap>0 ){ + if( (p->mFlags & SQLITE_DESERIALIZE_RESIZEABLE)==0 || NEVER(p->nMmap>0) ){ return SQLITE_FULL; } if( newSz>p->szMax ){ @@ -48852,8 +49414,9 @@ static int memdbTruncate(sqlite3_file *pFile, sqlite_int64 size){ MemStore *p = ((MemFile*)pFile)->pStore; int rc = SQLITE_OK; memdbEnter(p); - if( NEVER(size>p->sz) ){ - rc = SQLITE_FULL; + if( size>p->sz ){ + /* This can only happen with a corrupt wal mode db */ + rc = SQLITE_CORRUPT; }else{ p->sz = size; } @@ -48992,7 +49555,7 @@ static int memdbFetch( ){ MemStore *p = ((MemFile*)pFile)->pStore; memdbEnter(p); - if( iOfst+iAmt>p->sz ){ + if( iOfst+iAmt>p->sz || (p->mFlags & SQLITE_DESERIALIZE_RESIZEABLE)!=0 ){ *pp = 0; }else{ p->nMmap++; @@ -49026,10 +49589,9 @@ static int memdbOpen( MemFile *pFile = (MemFile*)pFd; MemStore *p = 0; int szName; - if( (flags & SQLITE_OPEN_MAIN_DB)==0 ){ - return ORIGVFS(pVfs)->xOpen(ORIGVFS(pVfs), zName, pFd, flags, pOutFlags); - } - memset(pFile, 0, sizeof(*p)); + UNUSED_PARAMETER(pVfs); + + memset(pFile, 0, sizeof(*pFile)); szName = sqlite3Strlen30(zName); if( szName>1 && zName[0]=='/' ){ int i; @@ -49088,8 +49650,9 @@ static int memdbOpen( p->szMax = sqlite3GlobalConfig.mxMemdbSize; } pFile->pStore = p; - assert( pOutFlags!=0 ); /* True because flags==SQLITE_OPEN_MAIN_DB */ - *pOutFlags = flags | SQLITE_OPEN_MEMORY; + if( pOutFlags!=0 ){ + *pOutFlags = flags | SQLITE_OPEN_MEMORY; + } pFd->pMethods = &memdb_io_methods; memdbLeave(p); return SQLITE_OK; @@ -49330,7 +49893,8 @@ SQLITE_API int sqlite3_deserialize( sqlite3_mutex_enter(db->mutex); if( zSchema==0 ) zSchema = db->aDb[0].zDbSName; iDb = sqlite3FindDbName(db, zSchema); - if( iDb<0 ){ + testcase( iDb==1 ); + if( iDb<2 && iDb!=0 ){ rc = SQLITE_ERROR; goto end_deserialize; } @@ -49753,7 +50317,7 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ sqlite3BitvecClear(0, 1, pTmpSpace); /* Run the program */ - pc = 0; + pc = i = 0; while( (op = aOp[pc])!=0 ){ switch( op ){ case 1: @@ -50057,11 +50621,14 @@ static int numberOfCachePages(PCache *p){ ** suggested cache size is set to N. */ return p->szCache; }else{ + i64 n; /* IMPLEMANTATION-OF: R-59858-46238 If the argument N is negative, then the ** number of cache pages is adjusted to be a number of pages that would ** use approximately abs(N*1024) bytes of memory based on the current ** page size. */ - return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra)); + n = ((-1024*(i64)p->szCache)/(p->szPage+p->szExtra)); + if( n>1000000000 ) n = 1000000000; + return (int)n; } } @@ -50362,7 +50929,8 @@ SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ ** make it so. */ SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ - assert( p->nRef>0 ); + assert( p->nRef>0 || p->pCache->bPurgeable==0 ); + testcase( p->nRef==0 ); assert( sqlite3PcachePageSanity(p) ); if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/ p->flags &= ~PGHDR_DONT_WRITE; @@ -51516,12 +52084,18 @@ static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ */ static void pcache1Cachesize(sqlite3_pcache *p, int nMax){ PCache1 *pCache = (PCache1 *)p; + u32 n; + assert( nMax>=0 ); if( pCache->bPurgeable ){ PGroup *pGroup = pCache->pGroup; pcache1EnterMutex(pGroup); - pGroup->nMaxPage += (nMax - pCache->nMax); + n = (u32)nMax; + if( n > 0x7fff0000 - pGroup->nMaxPage + pCache->nMax ){ + n = 0x7fff0000 - pGroup->nMaxPage + pCache->nMax; + } + pGroup->nMaxPage += (n - pCache->nMax); pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; - pCache->nMax = nMax; + pCache->nMax = n; pCache->n90pct = pCache->nMax*9/10; pcache1EnforceMaxPage(pCache); pcache1LeaveMutex(pGroup); @@ -51537,7 +52111,7 @@ static void pcache1Shrink(sqlite3_pcache *p){ PCache1 *pCache = (PCache1*)p; if( pCache->bPurgeable ){ PGroup *pGroup = pCache->pGroup; - int savedMaxPage; + unsigned int savedMaxPage; pcache1EnterMutex(pGroup); savedMaxPage = pGroup->nMaxPage; pGroup->nMaxPage = 0; @@ -53274,6 +53848,7 @@ struct Pager { u8 noLock; /* Do not lock (except in WAL mode) */ u8 readOnly; /* True for a read-only database */ u8 memDb; /* True to inhibit all file I/O */ + u8 memVfs; /* VFS-implemented memory database */ /************************************************************************** ** The following block contains those class members that change during @@ -53323,8 +53898,8 @@ struct Pager { i16 nReserve; /* Number of unused bytes at end of each page */ u32 vfsFlags; /* Flags for sqlite3_vfs.xOpen() */ u32 sectorSize; /* Assumed sector size during rollback */ - int pageSize; /* Number of bytes in a page */ Pgno mxPgno; /* Maximum allowed size of the database */ + i64 pageSize; /* Number of bytes in a page */ i64 journalSizeLimit; /* Size limit for persistent journal files */ char *zFilename; /* Name of the database file */ char *zJournal; /* Name of the journal file */ @@ -55487,6 +56062,9 @@ static int pager_playback(Pager *pPager, int isHot){ goto end_playback; } pPager->dbSize = mxPg; + if( pPager->mxPgnomxPgno = mxPg; + } } /* Copy original pages out of the journal and back into the @@ -55668,6 +56246,7 @@ static int readDbPage(PgHdr *pPg){ */ static void pager_write_changecounter(PgHdr *pPg){ u32 change_counter; + if( NEVER(pPg==0) ) return; /* Increment the value just read and write it back to byte 24. */ change_counter = sqlite3Get4byte((u8*)pPg->pPager->dbFileVers)+1; @@ -56542,8 +57121,7 @@ static int pager_wait_on_lock(Pager *pPager, int locktype){ ** current database image, in pages, OR ** ** b) if the page content were written at this time, it would not -** be necessary to write the current content out to the sub-journal -** (as determined by function subjRequiresPage()). +** be necessary to write the current content out to the sub-journal. ** ** If the condition asserted by this function were not true, and the ** dirty page were to be discarded from the cache via the pagerStress() @@ -56558,8 +57136,16 @@ static int pager_wait_on_lock(Pager *pPager, int locktype){ */ #if defined(SQLITE_DEBUG) static void assertTruncateConstraintCb(PgHdr *pPg){ + Pager *pPager = pPg->pPager; assert( pPg->flags&PGHDR_DIRTY ); - assert( !subjRequiresPage(pPg) || pPg->pgno<=pPg->pPager->dbSize ); + if( pPg->pgno>pPager->dbSize ){ /* if (a) is false */ + Pgno pgno = pPg->pgno; + int i; + for(i=0; ipPager->nSavepoint; i++){ + PagerSavepoint *p = &pPager->aSavepoint[i]; + assert( p->nOrigpInSavepoint,pgno) ); + } + } } static void assertTruncateConstraint(Pager *pPager){ sqlite3PcacheIterateDirty(pPager->pPCache, assertTruncateConstraintCb); @@ -56581,7 +57167,6 @@ static void assertTruncateConstraint(Pager *pPager){ */ SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){ assert( pPager->dbSize>=nPage || CORRUPT_DB ); - testcase( pPager->dbSizeeState>=PAGER_WRITER_CACHEMOD ); pPager->dbSize = nPage; @@ -57502,6 +58087,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( pPager->zWal = 0; } #endif + (void)pPtr; /* Suppress warning about unused pPtr value */ if( nPathname ) sqlite3DbFree(0, zPathname); pPager->pVfs = pVfs; @@ -57514,7 +58100,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); assert( !memDb ); #ifndef SQLITE_OMIT_DESERIALIZE - memJM = (fout&SQLITE_OPEN_MEMORY)!=0; + pPager->memVfs = memJM = (fout&SQLITE_OPEN_MEMORY)!=0; #endif readOnly = (fout&SQLITE_OPEN_READONLY)!=0; @@ -57899,7 +58485,7 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){ ** may mean that the pager was in the error-state when this ** function was called and the journal file does not exist. */ - if( !isOpen(pPager->jfd) ){ + if( !isOpen(pPager->jfd) && pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ sqlite3_vfs * const pVfs = pPager->pVfs; int bExists; /* True if journal file exists */ rc = sqlite3OsAccess( @@ -58301,6 +58887,7 @@ SQLITE_PRIVATE int sqlite3PagerGet( DbPage **ppPage, /* Write a pointer to the page here */ int flags /* PAGER_GET_XXX flags */ ){ + /* printf("PAGE %u\n", pgno); fflush(stdout); */ return pPager->xGet(pPager, pgno, ppPage, flags); } @@ -59381,8 +59968,8 @@ SQLITE_PRIVATE int sqlite3PagerRefcount(Pager *pPager){ ** used by the pager and its associated cache. */ SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager *pPager){ - int perPageSize = pPager->pageSize + pPager->nExtra + sizeof(PgHdr) - + 5*sizeof(void*); + int perPageSize = pPager->pageSize + pPager->nExtra + + (int)(sizeof(PgHdr) + 5*sizeof(void*)); return perPageSize*sqlite3PcachePagecount(pPager->pPCache) + sqlite3MallocSize(pPager) + pPager->pageSize; @@ -59451,7 +60038,7 @@ SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, i ** Return true if this is an in-memory or temp-file backed pager. */ SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager *pPager){ - return pPager->tempFile; + return pPager->tempFile || pPager->memVfs; } /* @@ -59576,14 +60163,14 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ } pPager->nSavepoint = nNew; - /* If this is a release of the outermost savepoint, truncate - ** the sub-journal to zero bytes in size. */ + /* Truncate the sub-journal so that it only includes the parts + ** that are still in use. */ if( op==SAVEPOINT_RELEASE ){ PagerSavepoint *pRel = &pPager->aSavepoint[nNew]; if( pRel->bTruncateOnRelease && isOpen(pPager->sjfd) ){ /* Only truncate if it is an in-memory sub-journal. */ if( sqlite3JournalIsInMemory(pPager->sjfd) ){ - i64 sz = (pPager->pageSize+4)*pRel->iSubRec; + i64 sz = (pPager->pageSize+4)*(i64)pRel->iSubRec; rc = sqlite3OsTruncate(pPager->sjfd, sz); assert( rc==SQLITE_OK ); } @@ -59771,7 +60358,7 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i pPgOld = sqlite3PagerLookup(pPager, pgno); assert( !pPgOld || pPgOld->nRef==1 || CORRUPT_DB ); if( pPgOld ){ - if( pPgOld->nRef>1 ){ + if( NEVER(pPgOld->nRef>1) ){ sqlite3PagerUnrefNotNull(pPgOld); return SQLITE_CORRUPT_BKPT; } @@ -59906,12 +60493,12 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){ u8 eOld = pPager->journalMode; /* Prior journalmode */ /* The eMode parameter is always valid */ - assert( eMode==PAGER_JOURNALMODE_DELETE - || eMode==PAGER_JOURNALMODE_TRUNCATE - || eMode==PAGER_JOURNALMODE_PERSIST - || eMode==PAGER_JOURNALMODE_OFF - || eMode==PAGER_JOURNALMODE_WAL - || eMode==PAGER_JOURNALMODE_MEMORY ); + assert( eMode==PAGER_JOURNALMODE_DELETE /* 0 */ + || eMode==PAGER_JOURNALMODE_PERSIST /* 1 */ + || eMode==PAGER_JOURNALMODE_OFF /* 2 */ + || eMode==PAGER_JOURNALMODE_TRUNCATE /* 3 */ + || eMode==PAGER_JOURNALMODE_MEMORY /* 4 */ + || eMode==PAGER_JOURNALMODE_WAL /* 5 */ ); /* This routine is only called from the OP_JournalMode opcode, and ** the logic there will never allow a temporary file to be changed @@ -59948,7 +60535,6 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){ assert( isOpen(pPager->fd) || pPager->exclusiveMode ); if( !pPager->exclusiveMode && (eOld & 5)==1 && (eMode & 1)==0 ){ - /* In this case we would like to delete the journal file. If it is ** not possible, then that is not a problem. Deleting the journal file ** here is an optimization only. @@ -60060,6 +60646,18 @@ SQLITE_PRIVATE int sqlite3PagerCheckpoint( int *pnCkpt /* OUT: Final number of checkpointed frames */ ){ int rc = SQLITE_OK; + if( pPager->pWal==0 && pPager->journalMode==PAGER_JOURNALMODE_WAL ){ + /* This only happens when a database file is zero bytes in size opened and + ** then "PRAGMA journal_mode=WAL" is run and then sqlite3_wal_checkpoint() + ** is invoked without any intervening transactions. We need to start + ** a transaction to initialize pWal. The PRAGMA table_list statement is + ** used for this since it starts transactions on every database file, + ** including all ATTACHed databases. This seems expensive for a single + ** sqlite3_wal_checkpoint() call, but it happens very rarely. + ** https://sqlite.org/forum/forumpost/fd0f19d229156939 + */ + sqlite3_exec(db, "PRAGMA table_list",0,0,0); + } if( pPager->pWal ){ rc = sqlite3WalCheckpoint(pPager->pWal, db, eMode, (eMode==SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler), @@ -60517,7 +61115,10 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){ ** HASHTABLE_NPAGE_ONE frames. The values of HASHTABLE_NPAGE_ONE and ** HASHTABLE_NPAGE are selected so that together the wal-index header and ** first index block are the same size as all other index blocks in the -** wal-index. +** wal-index. The values are: +** +** HASHTABLE_NPAGE 4096 +** HASHTABLE_NPAGE_ONE 4062 ** ** Each index block contains two sections, a page-mapping that contains the ** database page number associated with each wal frame, and a hash-table @@ -60753,6 +61354,70 @@ struct WalCkptInfo { }; #define READMARK_NOT_USED 0xffffffff +/* +** This is a schematic view of the complete 136-byte header of the +** wal-index file (also known as the -shm file): +** +** +-----------------------------+ +** 0: | iVersion | \ +** +-----------------------------+ | +** 4: | (unused padding) | | +** +-----------------------------+ | +** 8: | iChange | | +** +-------+-------+-------------+ | +** 12: | bInit | bBig | szPage | | +** +-------+-------+-------------+ | +** 16: | mxFrame | | First copy of the +** +-----------------------------+ | WalIndexHdr object +** 20: | nPage | | +** +-----------------------------+ | +** 24: | aFrameCksum | | +** | | | +** +-----------------------------+ | +** 32: | aSalt | | +** | | | +** +-----------------------------+ | +** 40: | aCksum | | +** | | / +** +-----------------------------+ +** 48: | iVersion | \ +** +-----------------------------+ | +** 52: | (unused padding) | | +** +-----------------------------+ | +** 56: | iChange | | +** +-------+-------+-------------+ | +** 60: | bInit | bBig | szPage | | +** +-------+-------+-------------+ | Second copy of the +** 64: | mxFrame | | WalIndexHdr +** +-----------------------------+ | +** 68: | nPage | | +** +-----------------------------+ | +** 72: | aFrameCksum | | +** | | | +** +-----------------------------+ | +** 80: | aSalt | | +** | | | +** +-----------------------------+ | +** 88: | aCksum | | +** | | / +** +-----------------------------+ +** 96: | nBackfill | +** +-----------------------------+ +** 100: | 5 read marks | +** | | +** | | +** | | +** | | +** +-------+-------+------+------+ +** 120: | Write | Ckpt | Rcvr | Rd0 | \ +** +-------+-------+------+------+ ) 8 lock bytes +** | Read1 | Read2 | Rd3 | Rd4 | / +** +-------+-------+------+------+ +** 128: | nBackfillAttempted | +** +-----------------------------+ +** 132: | (unused padding) | +** +-----------------------------+ +*/ /* A block of WALINDEX_LOCK_RESERVED bytes beginning at ** WALINDEX_LOCK_OFFSET is reserved for locks. Since some systems @@ -60909,9 +61574,13 @@ struct WalIterator { ** so. It is safe to enlarge the wal-index if pWal->writeLock is true ** or pWal->exclusiveMode==WAL_HEAPMEMORY_MODE. ** -** If this call is successful, *ppPage is set to point to the wal-index -** page and SQLITE_OK is returned. If an error (an OOM or VFS error) occurs, -** then an SQLite error code is returned and *ppPage is set to 0. +** Three possible result scenarios: +** +** (1) rc==SQLITE_OK and *ppPage==Requested-Wal-Index-Page +** (2) rc>=SQLITE_ERROR and *ppPage==NULL +** (3) rc==SQLITE_OK and *ppPage==NULL // only if iPage==0 +** +** Scenario (3) can only occur when pWal->writeLock is false and iPage==0 */ static SQLITE_NOINLINE int walIndexPageRealloc( Wal *pWal, /* The WAL context */ @@ -60944,7 +61613,9 @@ static SQLITE_NOINLINE int walIndexPageRealloc( rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] ); - assert( pWal->apWiData[iPage]!=0 || rc!=SQLITE_OK || pWal->writeLock==0 ); + assert( pWal->apWiData[iPage]!=0 + || rc!=SQLITE_OK + || (pWal->writeLock==0 && iPage==0) ); testcase( pWal->apWiData[iPage]==0 && rc==SQLITE_OK ); if( rc==SQLITE_OK ){ if( iPage>0 && sqlite3FaultSim(600) ) rc = SQLITE_NOMEM; @@ -61283,8 +61954,8 @@ struct WalHashLoc { ** slot in the hash table is set to N, it refers to frame number ** (pLoc->iZero+N) in the log. ** -** Finally, set pLoc->aPgno so that pLoc->aPgno[1] is the page number of the -** first frame indexed by the hash table, frame (pLoc->iZero+1). +** Finally, set pLoc->aPgno so that pLoc->aPgno[0] is the page number of the +** first frame indexed by the hash table, frame (pLoc->iZero). */ static int walHashGet( Wal *pWal, /* WAL handle */ @@ -61296,7 +61967,7 @@ static int walHashGet( rc = walIndexPage(pWal, iHash, &pLoc->aPgno); assert( rc==SQLITE_OK || iHash>0 ); - if( rc==SQLITE_OK ){ + if( pLoc->aPgno ){ pLoc->aHash = (volatile ht_slot *)&pLoc->aPgno[HASHTABLE_NPAGE]; if( iHash==0 ){ pLoc->aPgno = &pLoc->aPgno[WALINDEX_HDR_SIZE/sizeof(u32)]; @@ -61304,7 +61975,8 @@ static int walHashGet( }else{ pLoc->iZero = HASHTABLE_NPAGE_ONE + (iHash-1)*HASHTABLE_NPAGE; } - pLoc->aPgno = &pLoc->aPgno[-1]; + }else if( NEVER(rc==SQLITE_OK) ){ + rc = SQLITE_ERROR; } return rc; } @@ -61386,8 +62058,9 @@ static void walCleanupHash(Wal *pWal){ /* Zero the entries in the aPgno array that correspond to frames with ** frame numbers greater than pWal->hdr.mxFrame. */ - nByte = (int)((char *)sLoc.aHash - (char *)&sLoc.aPgno[iLimit+1]); - memset((void *)&sLoc.aPgno[iLimit+1], 0, nByte); + nByte = (int)((char *)sLoc.aHash - (char *)&sLoc.aPgno[iLimit]); + assert( nByte>=0 ); + memset((void *)&sLoc.aPgno[iLimit], 0, nByte); #ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT /* Verify that the every entry in the mapping region is still reachable @@ -61396,11 +62069,11 @@ static void walCleanupHash(Wal *pWal){ if( iLimit ){ int j; /* Loop counter */ int iKey; /* Hash key */ - for(j=1; j<=iLimit; j++){ + for(j=0; j=0 ); + memset((void*)sLoc.aPgno, 0, nByte); } /* If the entry in aPgno[] is already set, then the previous writer @@ -61443,9 +62116,9 @@ static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){ ** Remove the remnants of that writers uncommitted transaction from ** the hash-table before writing any new entries. */ - if( sLoc.aPgno[idx] ){ + if( sLoc.aPgno[idx-1] ){ walCleanupHash(pWal); - assert( !sLoc.aPgno[idx] ); + assert( !sLoc.aPgno[idx-1] ); } /* Write the aPgno[] array entry and the hash-table slot. */ @@ -61453,7 +62126,7 @@ static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){ for(iKey=walHash(iPage); sLoc.aHash[iKey]; iKey=walNextHash(iKey)){ if( (nCollide--)==0 ) return SQLITE_CORRUPT_BKPT; } - sLoc.aPgno[idx] = iPage; + sLoc.aPgno[idx-1] = iPage; AtomicStore(&sLoc.aHash[iKey], (ht_slot)idx); #ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT @@ -61474,19 +62147,18 @@ static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){ */ if( (idx&0x3ff)==0 ){ int i; /* Loop counter */ - for(i=1; i<=idx; i++){ + for(i=0; iapWiData[iPg] = aPrivate; for(iFrame=iFirst; iFrame<=iLast; iFrame++){ @@ -61766,14 +62439,43 @@ SQLITE_PRIVATE int sqlite3WalOpen( assert( zWalName && zWalName[0] ); assert( pDbFd ); + /* Verify the values of various constants. Any changes to the values + ** of these constants would result in an incompatible on-disk format + ** for the -shm file. Any change that causes one of these asserts to + ** fail is a backward compatibility problem, even if the change otherwise + ** works. + ** + ** This table also serves as a helpful cross-reference when trying to + ** interpret hex dumps of the -shm file. + */ + assert( 48 == sizeof(WalIndexHdr) ); + assert( 40 == sizeof(WalCkptInfo) ); + assert( 120 == WALINDEX_LOCK_OFFSET ); + assert( 136 == WALINDEX_HDR_SIZE ); + assert( 4096 == HASHTABLE_NPAGE ); + assert( 4062 == HASHTABLE_NPAGE_ONE ); + assert( 8192 == HASHTABLE_NSLOT ); + assert( 383 == HASHTABLE_HASH_1 ); + assert( 32768 == WALINDEX_PGSZ ); + assert( 8 == SQLITE_SHM_NLOCK ); + assert( 5 == WAL_NREADER ); + assert( 24 == WAL_FRAME_HDRSIZE ); + assert( 32 == WAL_HDRSIZE ); + assert( 120 == WALINDEX_LOCK_OFFSET + WAL_WRITE_LOCK ); + assert( 121 == WALINDEX_LOCK_OFFSET + WAL_CKPT_LOCK ); + assert( 122 == WALINDEX_LOCK_OFFSET + WAL_RECOVER_LOCK ); + assert( 123 == WALINDEX_LOCK_OFFSET + WAL_READ_LOCK(0) ); + assert( 124 == WALINDEX_LOCK_OFFSET + WAL_READ_LOCK(1) ); + assert( 125 == WALINDEX_LOCK_OFFSET + WAL_READ_LOCK(2) ); + assert( 126 == WALINDEX_LOCK_OFFSET + WAL_READ_LOCK(3) ); + assert( 127 == WALINDEX_LOCK_OFFSET + WAL_READ_LOCK(4) ); + /* In the amalgamation, the os_unix.c and os_win.c source files come before ** this source file. Verify that the #defines of the locking byte offsets ** in os_unix.c and os_win.c agree with the WALINDEX_LOCK_OFFSET value. ** For that matter, if the lock offset ever changes from its initial design ** value of 120, we need to know that so there is an assert() to check it. */ - assert( 120==WALINDEX_LOCK_OFFSET ); - assert( 136==WALINDEX_HDR_SIZE ); #ifdef WIN_SHM_BASE assert( WIN_SHM_BASE==WALINDEX_LOCK_OFFSET ); #endif @@ -62075,7 +62777,6 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ int nEntry; /* Number of entries in this segment */ ht_slot *aIndex; /* Sorted index for this segment */ - sLoc.aPgno++; if( (i+1)==nSegment ){ nEntry = (int)(iLast - sLoc.iZero); }else{ @@ -62856,7 +63557,9 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ } /* Allocate a buffer to read frames into */ - szFrame = pWal->hdr.szPage + WAL_FRAME_HDRSIZE; + assert( (pWal->szPage & (pWal->szPage-1))==0 ); + assert( pWal->szPage>=512 && pWal->szPage<=65536 ); + szFrame = pWal->szPage + WAL_FRAME_HDRSIZE; aFrame = (u8 *)sqlite3_malloc64(szFrame); if( aFrame==0 ){ rc = SQLITE_NOMEM_BKPT; @@ -62870,7 +63573,7 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ ** the caller. */ aSaveCksum[0] = pWal->hdr.aFrameCksum[0]; aSaveCksum[1] = pWal->hdr.aFrameCksum[1]; - for(iOffset=walFrameOffset(pWal->hdr.mxFrame+1, pWal->hdr.szPage); + for(iOffset=walFrameOffset(pWal->hdr.mxFrame+1, pWal->szPage); iOffset+szFrame<=szWal; iOffset+=szFrame ){ @@ -63214,7 +63917,8 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){ rc = walHashGet(pWal, walFramePage(i), &sLoc); if( rc!=SQLITE_OK ) break; - pgno = sLoc.aPgno[i-sLoc.iZero]; + assert( i - sLoc.iZero - 1 >=0 ); + pgno = sLoc.aPgno[i-sLoc.iZero-1]; iDbOff = (i64)(pgno-1) * szPage; if( iDbOff+szPage<=szDb ){ @@ -63447,7 +64151,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame( iKey = walHash(pgno); while( (iH = AtomicLoad(&sLoc.aHash[iKey]))!=0 ){ u32 iFrame = iH + sLoc.iZero; - if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH]==pgno ){ + if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH-1]==pgno ){ assert( iFrame>iRead || CORRUPT_DB ); iRead = iFrame; } @@ -64699,7 +65403,6 @@ typedef struct CellInfo CellInfo; */ struct MemPage { u8 isInit; /* True if previously initialized. MUST BE FIRST! */ - u8 bBusy; /* Prevent endless loops on corrupt database files */ u8 intKey; /* True if table b-trees. False for index b-trees */ u8 intKeyLeaf; /* True if the leaf of an intKey table */ Pgno pgno; /* Page number for this page */ @@ -64721,7 +65424,9 @@ struct MemPage { u8 *apOvfl[4]; /* Pointers to the body of overflow cells */ BtShared *pBt; /* Pointer to BtShared that this page is part of */ u8 *aData; /* Pointer to disk image of the page data */ - u8 *aDataEnd; /* One byte past the end of usable data */ + u8 *aDataEnd; /* One byte past the end of the entire page - not just + ** the usable space, the entire page. Used to prevent + ** corruption-induced of buffer overflow. */ u8 *aCellIdx; /* The cell index area */ u8 *aDataOfst; /* Same as aData for leaves. aData+4 for interior */ DbPage *pDbPage; /* Pager page handle */ @@ -66280,15 +66985,13 @@ static int btreeMoveto( sqlite3VdbeRecordUnpack(pKeyInfo, (int)nKey, pKey, pIdxKey); if( pIdxKey->nField==0 || pIdxKey->nField>pKeyInfo->nAllField ){ rc = SQLITE_CORRUPT_BKPT; - goto moveto_done; + }else{ + rc = sqlite3BtreeIndexMoveto(pCur, pIdxKey, pRes); } + sqlite3DbFree(pCur->pKeyInfo->db, pIdxKey); }else{ pIdxKey = 0; - } - rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes); -moveto_done: - if( pIdxKey ){ - sqlite3DbFree(pCur->pKeyInfo->db, pIdxKey); + rc = sqlite3BtreeTableMoveto(pCur, nKey, bias, pRes); } return rc; } @@ -66680,18 +67383,32 @@ static void btreeParseCellPtr( ** ** pIter += getVarint(pIter, (u64*)&pInfo->nKey); ** - ** The code is inlined to avoid a function call. + ** The code is inlined and the loop is unrolled for performance. + ** This routine is a high-runner. */ iKey = *pIter; if( iKey>=0x80 ){ - u8 *pEnd = &pIter[7]; - iKey &= 0x7f; - while(1){ - iKey = (iKey<<7) | (*++pIter & 0x7f); - if( (*pIter)<0x80 ) break; - if( pIter>=pEnd ){ - iKey = (iKey<<8) | *++pIter; - break; + u8 x; + iKey = ((iKey&0x7f)<<7) | ((x = *++pIter) & 0x7f); + if( x>=0x80 ){ + iKey = (iKey<<7) | ((x =*++pIter) & 0x7f); + if( x>=0x80 ){ + iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + if( x>=0x80 ){ + iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + if( x>=0x80 ){ + iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + if( x>=0x80 ){ + iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + if( x>=0x80 ){ + iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + if( x>=0x80 ){ + iKey = (iKey<<8) | (*++pIter); + } + } + } + } + } } } } @@ -66701,7 +67418,7 @@ static void btreeParseCellPtr( pInfo->nPayload = nPayload; pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); - testcase( nPayload==pPage->maxLocal+1 ); + testcase( nPayload==(u32)pPage->maxLocal+1 ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. @@ -66738,7 +67455,7 @@ static void btreeParseCellPtrIndex( pInfo->nPayload = nPayload; pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); - testcase( nPayload==pPage->maxLocal+1 ); + testcase( nPayload==(u32)pPage->maxLocal+1 ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. @@ -66801,7 +67518,7 @@ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ while( (*pIter++)&0x80 && pItermaxLocal ); - testcase( nSize==pPage->maxLocal+1 ); + testcase( nSize==(u32)pPage->maxLocal+1 ); if( nSize<=pPage->maxLocal ){ nSize += (u32)(pIter - pCell); if( nSize<4 ) nSize = 4; @@ -66809,7 +67526,7 @@ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ int minLocal = pPage->minLocal; nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); testcase( nSize==pPage->maxLocal ); - testcase( nSize==pPage->maxLocal+1 ); + testcase( nSize==(u32)pPage->maxLocal+1 ); if( nSize>pPage->maxLocal ){ nSize = minLocal; } @@ -66943,7 +67660,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){ if( iFree2+sz2 > usableSize ) return SQLITE_CORRUPT_PAGE(pPage); memmove(&data[iFree+sz+sz2], &data[iFree+sz], iFree2-(iFree+sz)); sz += sz2; - }else if( iFree+sz>usableSize ){ + }else if( NEVER(iFree+sz>usableSize) ){ return SQLITE_CORRUPT_PAGE(pPage); } @@ -67050,6 +67767,8 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){ ** fragmented bytes within the page. */ memcpy(&aData[iAddr], &aData[pc], 2); aData[hdr+7] += (u8)x; + testcase( pc+x>maxPC ); + return &aData[pc]; }else if( x+pc > maxPC ){ /* This slot extends off the end of the usable part of the page */ *pRc = SQLITE_CORRUPT_PAGE(pPg); @@ -67137,7 +67856,7 @@ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ int g2; assert( pSpace+nByte<=data+pPage->pBt->usableSize ); *pIdx = g2 = (int)(pSpace-data); - if( NEVER(g2<=gap) ){ + if( g2<=gap ){ return SQLITE_CORRUPT_PAGE(pPage); }else{ return SQLITE_OK; @@ -67223,7 +67942,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ if( iFreeBlk>pPage->pBt->usableSize-4 ){ /* TH3: corrupt081.100 */ return SQLITE_CORRUPT_PAGE(pPage); } - assert( iFreeBlk>iPtr || iFreeBlk==0 ); + assert( iFreeBlk>iPtr || iFreeBlk==0 || CORRUPT_DB ); /* At this point: ** iFreeBlk: First freeblock after iStart, or zero if none @@ -67494,7 +68213,7 @@ static int btreeInitPage(MemPage *pPage){ pPage->nOverflow = 0; pPage->cellOffset = pPage->hdrOffset + 8 + pPage->childPtrSize; pPage->aCellIdx = data + pPage->childPtrSize + 8; - pPage->aDataEnd = pPage->aData + pBt->usableSize; + pPage->aDataEnd = pPage->aData + pBt->pageSize; pPage->aDataOfst = pPage->aData + pPage->childPtrSize; /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the ** number of cells on the page. */ @@ -67529,7 +68248,7 @@ static void zeroPage(MemPage *pPage, int flags){ u8 hdr = pPage->hdrOffset; u16 first; - assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno ); + assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno || CORRUPT_DB ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); assert( sqlite3PagerGetData(pPage->pDbPage) == data ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); @@ -67545,7 +68264,7 @@ static void zeroPage(MemPage *pPage, int flags){ pPage->nFree = (u16)(pBt->usableSize - first); decodeFlags(pPage, flags); pPage->cellOffset = first; - pPage->aDataEnd = &data[pBt->usableSize]; + pPage->aDataEnd = &data[pBt->pageSize]; pPage->aCellIdx = &data[first]; pPage->aDataOfst = &data[pPage->childPtrSize]; pPage->nOverflow = 0; @@ -67671,7 +68390,7 @@ static int getAndInitPage( goto getAndInitPage_error2; } } - assert( (*ppPage)->pgno==pgno ); + assert( (*ppPage)->pgno==pgno || CORRUPT_DB ); assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) ); /* If obtaining a child page for a cursor, we must verify that the page is @@ -68148,30 +68867,38 @@ static int removeFromSharingList(BtShared *pBt){ ** MX_CELL_SIZE(pBt) bytes with a 4-byte prefix for a left-child ** pointer. */ -static void allocateTempSpace(BtShared *pBt){ - if( !pBt->pTmpSpace ){ - pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize ); - - /* One of the uses of pBt->pTmpSpace is to format cells before - ** inserting them into a leaf page (function fillInCell()). If - ** a cell is less than 4 bytes in size, it is rounded up to 4 bytes - ** by the various routines that manipulate binary cells. Which - ** can mean that fillInCell() only initializes the first 2 or 3 - ** bytes of pTmpSpace, but that the first 4 bytes are copied from - ** it into a database page. This is not actually a problem, but it - ** does cause a valgrind error when the 1 or 2 bytes of unitialized - ** data is passed to system call write(). So to avoid this error, - ** zero the first 4 bytes of temp space here. - ** - ** Also: Provide four bytes of initialized space before the - ** beginning of pTmpSpace as an area available to prepend the - ** left-child pointer to the beginning of a cell. - */ - if( pBt->pTmpSpace ){ - memset(pBt->pTmpSpace, 0, 8); - pBt->pTmpSpace += 4; - } +static SQLITE_NOINLINE int allocateTempSpace(BtShared *pBt){ + assert( pBt!=0 ); + assert( pBt->pTmpSpace==0 ); + /* This routine is called only by btreeCursor() when allocating the + ** first write cursor for the BtShared object */ + assert( pBt->pCursor!=0 && (pBt->pCursor->curFlags & BTCF_WriteFlag)!=0 ); + pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize ); + if( pBt->pTmpSpace==0 ){ + BtCursor *pCur = pBt->pCursor; + pBt->pCursor = pCur->pNext; /* Unlink the cursor */ + memset(pCur, 0, sizeof(*pCur)); + return SQLITE_NOMEM_BKPT; } + + /* One of the uses of pBt->pTmpSpace is to format cells before + ** inserting them into a leaf page (function fillInCell()). If + ** a cell is less than 4 bytes in size, it is rounded up to 4 bytes + ** by the various routines that manipulate binary cells. Which + ** can mean that fillInCell() only initializes the first 2 or 3 + ** bytes of pTmpSpace, but that the first 4 bytes are copied from + ** it into a database page. This is not actually a problem, but it + ** does cause a valgrind error when the 1 or 2 bytes of unitialized + ** data is passed to system call write(). So to avoid this error, + ** zero the first 4 bytes of temp space here. + ** + ** Also: Provide four bytes of initialized space before the + ** beginning of pTmpSpace as an area available to prepend the + ** left-child pointer to the beginning of a cell. + */ + memset(pBt->pTmpSpace, 0, 8); + pBt->pTmpSpace += 4; + return SQLITE_OK; } /* @@ -68550,7 +69277,6 @@ static int lockBtree(BtShared *pBt){ MemPage *pPage1; /* Page 1 of the database file */ u32 nPage; /* Number of pages in the database */ u32 nPageFile = 0; /* Number of pages in the database file */ - u32 nPageHeader; /* Number of pages in the database according to hdr */ assert( sqlite3_mutex_held(pBt->mutex) ); assert( pBt->pPage1==0 ); @@ -68562,7 +69288,7 @@ static int lockBtree(BtShared *pBt){ /* Do some checking to help insure the file we opened really is ** a valid database file. */ - nPage = nPageHeader = get4byte(28+(u8*)pPage1->aData); + nPage = get4byte(28+(u8*)pPage1->aData); sqlite3PagerPagecount(pBt->pPager, (int*)&nPageFile); if( nPage==0 || memcmp(24+(u8*)pPage1->aData, 92+(u8*)pPage1->aData,4)!=0 ){ nPage = nPageFile; @@ -68597,7 +69323,7 @@ static int lockBtree(BtShared *pBt){ goto page1_init_failed; } - /* If the write version is set to 2, this database should be accessed + /* If the read version is set to 2, this database should be accessed ** in WAL mode. If the log is not already open, open it now. Then ** return SQLITE_OK and return without populating BtShared.pPage1. ** The caller detects this and calls this function again. This is @@ -68669,9 +69395,13 @@ static int lockBtree(BtShared *pBt){ pageSize-usableSize); return rc; } - if( sqlite3WritableSchema(pBt->db)==0 && nPage>nPageFile ){ - rc = SQLITE_CORRUPT_BKPT; - goto page1_init_failed; + if( nPage>nPageFile ){ + if( sqlite3WritableSchema(pBt->db)==0 ){ + rc = SQLITE_CORRUPT_BKPT; + goto page1_init_failed; + }else{ + nPage = nPageFile; + } } /* EVIDENCE-OF: R-28312-64704 However, the usable size is not allowed to ** be less than 480. In other words, if the page size is 512, then the @@ -69395,16 +70125,18 @@ SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *p){ /* ** This routine is called prior to sqlite3PagerCommit when a transaction ** is committed for an auto-vacuum database. -** -** If SQLITE_OK is returned, then *pnTrunc is set to the number of pages -** the database file should be truncated to during the commit process. -** i.e. the database has been reorganized so that only the first *pnTrunc -** pages are in use. */ -static int autoVacuumCommit(BtShared *pBt){ +static int autoVacuumCommit(Btree *p){ int rc = SQLITE_OK; - Pager *pPager = pBt->pPager; - VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager); ) + Pager *pPager; + BtShared *pBt; + sqlite3 *db; + VVA_ONLY( int nRef ); + + assert( p!=0 ); + pBt = p->pBt; + pPager = pBt->pPager; + VVA_ONLY( nRef = sqlite3PagerRefcount(pPager); ) assert( sqlite3_mutex_held(pBt->mutex) ); invalidateAllOverflowCache(pBt); @@ -69412,6 +70144,7 @@ static int autoVacuumCommit(BtShared *pBt){ if( !pBt->incrVacuum ){ Pgno nFin; /* Number of pages in database after autovacuuming */ Pgno nFree; /* Number of pages on the freelist initially */ + Pgno nVac; /* Number of pages to vacuum */ Pgno iFree; /* The next page to be freed */ Pgno nOrig; /* Database size before freeing */ @@ -69425,18 +70158,42 @@ static int autoVacuumCommit(BtShared *pBt){ } nFree = get4byte(&pBt->pPage1->aData[36]); - nFin = finalDbSize(pBt, nOrig, nFree); + db = p->db; + if( db->xAutovacPages ){ + int iDb; + for(iDb=0; ALWAYS(iDbnDb); iDb++){ + if( db->aDb[iDb].pBt==p ) break; + } + nVac = db->xAutovacPages( + db->pAutovacPagesArg, + db->aDb[iDb].zDbSName, + nOrig, + nFree, + pBt->pageSize + ); + if( nVac>nFree ){ + nVac = nFree; + } + if( nVac==0 ){ + return SQLITE_OK; + } + }else{ + nVac = nFree; + } + nFin = finalDbSize(pBt, nOrig, nVac); if( nFin>nOrig ) return SQLITE_CORRUPT_BKPT; if( nFinnFin && rc==SQLITE_OK; iFree--){ - rc = incrVacuumStep(pBt, nFin, iFree, 1); + rc = incrVacuumStep(pBt, nFin, iFree, nVac==nFree); } if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); - put4byte(&pBt->pPage1->aData[32], 0); - put4byte(&pBt->pPage1->aData[36], 0); + if( nVac==nFree ){ + put4byte(&pBt->pPage1->aData[32], 0); + put4byte(&pBt->pPage1->aData[36], 0); + } put4byte(&pBt->pPage1->aData[28], nFin); pBt->bDoTruncate = 1; pBt->nPage = nFin; @@ -69487,7 +70244,7 @@ SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zSuperJrnl){ sqlite3BtreeEnter(p); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ - rc = autoVacuumCommit(pBt); + rc = autoVacuumCommit(p); if( rc!=SQLITE_OK ){ sqlite3BtreeLeave(p); return rc; @@ -69674,7 +70431,7 @@ static void btreeSetNPage(BtShared *pBt, MemPage *pPage1){ int nPage = get4byte(&pPage1->aData[28]); testcase( nPage==0 ); if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage); - testcase( pBt->nPage!=nPage ); + testcase( pBt->nPage!=(u32)nPage ); pBt->nPage = nPage; } @@ -69886,10 +70643,6 @@ static int btreeCursor( assert( pBt->pPage1 && pBt->pPage1->aData ); assert( wrFlag==0 || (pBt->btsFlags & BTS_READ_ONLY)==0 ); - if( wrFlag ){ - allocateTempSpace(pBt); - if( pBt->pTmpSpace==0 ) return SQLITE_NOMEM_BKPT; - } if( iTable<=1 ){ if( iTable<1 ){ return SQLITE_CORRUPT_BKPT; @@ -69906,19 +70659,25 @@ static int btreeCursor( pCur->pKeyInfo = pKeyInfo; pCur->pBtree = p; pCur->pBt = pBt; - pCur->curFlags = wrFlag ? BTCF_WriteFlag : 0; - pCur->curPagerFlags = wrFlag ? 0 : PAGER_GET_READONLY; + pCur->curFlags = 0; /* If there are two or more cursors on the same btree, then all such ** cursors *must* have the BTCF_Multiple flag set. */ for(pX=pBt->pCursor; pX; pX=pX->pNext){ if( pX->pgnoRoot==iTable ){ pX->curFlags |= BTCF_Multiple; - pCur->curFlags |= BTCF_Multiple; + pCur->curFlags = BTCF_Multiple; } } + pCur->eState = CURSOR_INVALID; pCur->pNext = pBt->pCursor; pBt->pCursor = pCur; - pCur->eState = CURSOR_INVALID; + if( wrFlag ){ + pCur->curFlags |= BTCF_WriteFlag; + pCur->curPagerFlags = 0; + if( pBt->pTmpSpace==0 ) return allocateTempSpace(pBt); + }else{ + pCur->curPagerFlags = PAGER_GET_READONLY; + } return SQLITE_OK; } static int btreeCursorWithLock( @@ -70292,7 +71051,9 @@ static int accessPayload( assert( pPage ); assert( eOp==0 || eOp==1 ); assert( pCur->eState==CURSOR_VALID ); - assert( pCur->ixnCell ); + if( pCur->ix>=pPage->nCell ){ + return SQLITE_CORRUPT_PAGE(pPage); + } assert( cursorHoldsMutex(pCur) ); getCellInfo(pCur); @@ -70479,7 +71240,6 @@ SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor *pCur, u32 offset, u32 amt, void assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage>=0 && pCur->pPage ); - assert( pCur->ixpPage->nCell ); return accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0); } @@ -70541,7 +71301,7 @@ static const void *fetchPayload( assert( pCur->eState==CURSOR_VALID ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); assert( cursorOwnsBtShared(pCur) ); - assert( pCur->ixpPage->nCell ); + assert( pCur->ixpPage->nCell || CORRUPT_DB ); assert( pCur->info.nSize>0 ); assert( pCur->info.pPayload>pCur->pPage->aData || CORRUPT_DB ); assert( pCur->info.pPayloadpPage->aDataEnd ||CORRUPT_DB); @@ -70692,7 +71452,7 @@ static int moveToRoot(BtCursor *pCur){ while( --pCur->iPage ){ releasePageNotNull(pCur->apPage[pCur->iPage]); } - pCur->pPage = pCur->apPage[0]; + pRoot = pCur->pPage = pCur->apPage[0]; goto skip_init; } }else if( pCur->pgnoRoot==0 ){ @@ -70717,7 +71477,7 @@ static int moveToRoot(BtCursor *pCur){ pCur->curIntKey = pCur->pPage->intKey; } pRoot = pCur->pPage; - assert( pRoot->pgno==pCur->pgnoRoot ); + assert( pRoot->pgno==pCur->pgnoRoot || CORRUPT_DB ); /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is @@ -70739,7 +71499,6 @@ static int moveToRoot(BtCursor *pCur){ pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl); - pRoot = pCur->pPage; if( pRoot->nCell>0 ){ pCur->eState = CURSOR_VALID; }else if( !pRoot->leaf ){ @@ -70874,12 +71633,8 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ return rc; } -/* Move the cursor so that it points to an entry near the key -** specified by pIdxKey or intKey. Return a success code. -** -** For INTKEY tables, the intKey parameter is used. pIdxKey -** must be NULL. For index tables, pIdxKey is used and intKey -** is ignored. +/* Move the cursor so that it points to an entry in a table (a.k.a INTKEY) +** table near the key intKey. Return a success code. ** ** If an exact match is not found, then the cursor is always ** left pointing at a leaf page which would hold the entry if it @@ -70892,39 +71647,32 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ ** *pRes is as follows: ** ** *pRes<0 The cursor is left pointing at an entry that -** is smaller than intKey/pIdxKey or if the table is empty +** is smaller than intKey or if the table is empty ** and the cursor is therefore left point to nothing. ** ** *pRes==0 The cursor is left pointing at an entry that -** exactly matches intKey/pIdxKey. +** exactly matches intKey. ** ** *pRes>0 The cursor is left pointing at an entry that -** is larger than intKey/pIdxKey. -** -** For index tables, the pIdxKey->eqSeen field is set to 1 if there -** exists an entry in the table that exactly matches pIdxKey. +** is larger than intKey. */ -SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( +SQLITE_PRIVATE int sqlite3BtreeTableMoveto( BtCursor *pCur, /* The cursor to be moved */ - UnpackedRecord *pIdxKey, /* Unpacked index key */ i64 intKey, /* The table key */ int biasRight, /* If true, bias the search to the high end */ int *pRes /* Write search results here */ ){ int rc; - RecordCompare xRecordCompare; assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); assert( pRes ); - assert( (pIdxKey==0)==(pCur->pKeyInfo==0) ); - assert( pCur->eState!=CURSOR_VALID || (pIdxKey==0)==(pCur->curIntKey!=0) ); + assert( pCur->pKeyInfo==0 ); + assert( pCur->eState!=CURSOR_VALID || pCur->curIntKey!=0 ); /* If the cursor is already positioned at the point we are trying ** to move to, then just return without doing any work */ - if( pIdxKey==0 - && pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0 - ){ + if( pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0 ){ if( pCur->info.nKey==intKey ){ *pRes = 0; return SQLITE_OK; @@ -70946,9 +71694,7 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( if( pCur->info.nKey==intKey ){ return SQLITE_OK; } - }else if( rc==SQLITE_DONE ){ - rc = SQLITE_OK; - }else{ + }else if( rc!=SQLITE_DONE ){ return rc; } } @@ -70959,17 +71705,6 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( pCur->pBtree->nSeek++; /* Performance measurement during testing */ #endif - if( pIdxKey ){ - xRecordCompare = sqlite3VdbeFindCompare(pIdxKey); - pIdxKey->errCode = 0; - assert( pIdxKey->default_rc==1 - || pIdxKey->default_rc==0 - || pIdxKey->default_rc==-1 - ); - }else{ - xRecordCompare = 0; /* All keys are integers */ - } - rc = moveToRoot(pCur); if( rc ){ if( rc==SQLITE_EMPTY ){ @@ -70984,7 +71719,8 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( assert( pCur->eState==CURSOR_VALID ); assert( pCur->pPage->nCell > 0 ); assert( pCur->iPage==0 || pCur->apPage[0]->intKey==pCur->curIntKey ); - assert( pCur->curIntKey || pIdxKey ); + assert( pCur->curIntKey ); + for(;;){ int lwr, upr, idx, c; Pgno chldPg; @@ -70998,144 +71734,246 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( ** be the right kind (index or table) of b-tree page. Otherwise ** a moveToChild() or moveToRoot() call would have detected corruption. */ assert( pPage->nCell>0 ); - assert( pPage->intKey==(pIdxKey==0) ); + assert( pPage->intKey ); lwr = 0; upr = pPage->nCell-1; assert( biasRight==0 || biasRight==1 ); idx = upr>>(1-biasRight); /* idx = biasRight ? upr : (lwr+upr)/2; */ - pCur->ix = (u16)idx; - if( xRecordCompare==0 ){ - for(;;){ - i64 nCellKey; - pCell = findCellPastPtr(pPage, idx); - if( pPage->intKeyLeaf ){ - while( 0x80 <= *(pCell++) ){ - if( pCell>=pPage->aDataEnd ){ - return SQLITE_CORRUPT_PAGE(pPage); - } + for(;;){ + i64 nCellKey; + pCell = findCellPastPtr(pPage, idx); + if( pPage->intKeyLeaf ){ + while( 0x80 <= *(pCell++) ){ + if( pCell>=pPage->aDataEnd ){ + return SQLITE_CORRUPT_PAGE(pPage); } } - getVarint(pCell, (u64*)&nCellKey); - if( nCellKeyupr ){ c = -1; break; } - }else if( nCellKey>intKey ){ - upr = idx-1; - if( lwr>upr ){ c = +1; break; } + } + getVarint(pCell, (u64*)&nCellKey); + if( nCellKeyupr ){ c = -1; break; } + }else if( nCellKey>intKey ){ + upr = idx-1; + if( lwr>upr ){ c = +1; break; } + }else{ + assert( nCellKey==intKey ); + pCur->ix = (u16)idx; + if( !pPage->leaf ){ + lwr = idx; + goto moveto_table_next_layer; }else{ - assert( nCellKey==intKey ); - pCur->ix = (u16)idx; - if( !pPage->leaf ){ - lwr = idx; - goto moveto_next_layer; - }else{ - pCur->curFlags |= BTCF_ValidNKey; - pCur->info.nKey = nCellKey; - pCur->info.nSize = 0; - *pRes = 0; - return SQLITE_OK; - } + pCur->curFlags |= BTCF_ValidNKey; + pCur->info.nKey = nCellKey; + pCur->info.nSize = 0; + *pRes = 0; + return SQLITE_OK; } - assert( lwr+upr>=0 ); - idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2; */ } + assert( lwr+upr>=0 ); + idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2; */ + } + assert( lwr==upr+1 || !pPage->leaf ); + assert( pPage->isInit ); + if( pPage->leaf ){ + assert( pCur->ixpPage->nCell ); + pCur->ix = (u16)idx; + *pRes = c; + rc = SQLITE_OK; + goto moveto_table_finish; + } +moveto_table_next_layer: + if( lwr>=pPage->nCell ){ + chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]); }else{ - for(;;){ - int nCell; /* Size of the pCell cell in bytes */ - pCell = findCellPastPtr(pPage, idx); - - /* The maximum supported page-size is 65536 bytes. This means that - ** the maximum number of record bytes stored on an index B-Tree - ** page is less than 16384 bytes and may be stored as a 2-byte - ** varint. This information is used to attempt to avoid parsing - ** the entire cell by checking for the cases where the record is - ** stored entirely within the b-tree page by inspecting the first - ** 2 bytes of the cell. - */ - nCell = pCell[0]; - if( nCell<=pPage->max1bytePayload ){ - /* This branch runs if the record-size field of the cell is a - ** single byte varint and the record fits entirely on the main - ** b-tree page. */ - testcase( pCell+nCell+1==pPage->aDataEnd ); - c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey); - }else if( !(pCell[1] & 0x80) - && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal - ){ - /* The record-size field is a 2 byte varint and the record - ** fits entirely on the main b-tree page. */ - testcase( pCell+nCell+2==pPage->aDataEnd ); - c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey); - }else{ - /* The record flows over onto one or more overflow pages. In - ** this case the whole cell needs to be parsed, a buffer allocated - ** and accessPayload() used to retrieve the record into the - ** buffer before VdbeRecordCompare() can be called. - ** - ** If the record is corrupt, the xRecordCompare routine may read - ** up to two varints past the end of the buffer. An extra 18 - ** bytes of padding is allocated at the end of the buffer in - ** case this happens. */ - void *pCellKey; - u8 * const pCellBody = pCell - pPage->childPtrSize; - const int nOverrun = 18; /* Size of the overrun padding */ - pPage->xParseCell(pPage, pCellBody, &pCur->info); - nCell = (int)pCur->info.nKey; - testcase( nCell<0 ); /* True if key size is 2^32 or more */ - testcase( nCell==0 ); /* Invalid key size: 0x80 0x80 0x00 */ - testcase( nCell==1 ); /* Invalid key size: 0x80 0x80 0x01 */ - testcase( nCell==2 ); /* Minimum legal index key size */ - if( nCell<2 || nCell/pCur->pBt->usableSize>pCur->pBt->nPage ){ - rc = SQLITE_CORRUPT_PAGE(pPage); - goto moveto_finish; - } - pCellKey = sqlite3Malloc( nCell+nOverrun ); - if( pCellKey==0 ){ - rc = SQLITE_NOMEM_BKPT; - goto moveto_finish; - } - pCur->ix = (u16)idx; - rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0); - memset(((u8*)pCellKey)+nCell,0,nOverrun); /* Fix uninit warnings */ - pCur->curFlags &= ~BTCF_ValidOvfl; - if( rc ){ - sqlite3_free(pCellKey); - goto moveto_finish; - } - c = sqlite3VdbeRecordCompare(nCell, pCellKey, pIdxKey); - sqlite3_free(pCellKey); + chldPg = get4byte(findCell(pPage, lwr)); + } + pCur->ix = (u16)lwr; + rc = moveToChild(pCur, chldPg); + if( rc ) break; + } +moveto_table_finish: + pCur->info.nSize = 0; + assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); + return rc; +} + +/* Move the cursor so that it points to an entry in an index table +** near the key pIdxKey. Return a success code. +** +** If an exact match is not found, then the cursor is always +** left pointing at a leaf page which would hold the entry if it +** were present. The cursor might point to an entry that comes +** before or after the key. +** +** An integer is written into *pRes which is the result of +** comparing the key with the entry to which the cursor is +** pointing. The meaning of the integer written into +** *pRes is as follows: +** +** *pRes<0 The cursor is left pointing at an entry that +** is smaller than pIdxKey or if the table is empty +** and the cursor is therefore left point to nothing. +** +** *pRes==0 The cursor is left pointing at an entry that +** exactly matches pIdxKey. +** +** *pRes>0 The cursor is left pointing at an entry that +** is larger than pIdxKey. +** +** The pIdxKey->eqSeen field is set to 1 if there +** exists an entry in the table that exactly matches pIdxKey. +*/ +SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( + BtCursor *pCur, /* The cursor to be moved */ + UnpackedRecord *pIdxKey, /* Unpacked index key */ + int *pRes /* Write search results here */ +){ + int rc; + RecordCompare xRecordCompare; + + assert( cursorOwnsBtShared(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + assert( pRes ); + assert( pCur->pKeyInfo!=0 ); + +#ifdef SQLITE_DEBUG + pCur->pBtree->nSeek++; /* Performance measurement during testing */ +#endif + + xRecordCompare = sqlite3VdbeFindCompare(pIdxKey); + pIdxKey->errCode = 0; + assert( pIdxKey->default_rc==1 + || pIdxKey->default_rc==0 + || pIdxKey->default_rc==-1 + ); + + rc = moveToRoot(pCur); + if( rc ){ + if( rc==SQLITE_EMPTY ){ + assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); + *pRes = -1; + return SQLITE_OK; + } + return rc; + } + assert( pCur->pPage ); + assert( pCur->pPage->isInit ); + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->pPage->nCell > 0 ); + assert( pCur->iPage==0 || pCur->apPage[0]->intKey==pCur->curIntKey ); + assert( pCur->curIntKey || pIdxKey ); + for(;;){ + int lwr, upr, idx, c; + Pgno chldPg; + MemPage *pPage = pCur->pPage; + u8 *pCell; /* Pointer to current cell in pPage */ + + /* pPage->nCell must be greater than zero. If this is the root-page + ** the cursor would have been INVALID above and this for(;;) loop + ** not run. If this is not the root-page, then the moveToChild() routine + ** would have already detected db corruption. Similarly, pPage must + ** be the right kind (index or table) of b-tree page. Otherwise + ** a moveToChild() or moveToRoot() call would have detected corruption. */ + assert( pPage->nCell>0 ); + assert( pPage->intKey==(pIdxKey==0) ); + lwr = 0; + upr = pPage->nCell-1; + idx = upr>>1; /* idx = (lwr+upr)/2; */ + for(;;){ + int nCell; /* Size of the pCell cell in bytes */ + pCell = findCellPastPtr(pPage, idx); + + /* The maximum supported page-size is 65536 bytes. This means that + ** the maximum number of record bytes stored on an index B-Tree + ** page is less than 16384 bytes and may be stored as a 2-byte + ** varint. This information is used to attempt to avoid parsing + ** the entire cell by checking for the cases where the record is + ** stored entirely within the b-tree page by inspecting the first + ** 2 bytes of the cell. + */ + nCell = pCell[0]; + if( nCell<=pPage->max1bytePayload ){ + /* This branch runs if the record-size field of the cell is a + ** single byte varint and the record fits entirely on the main + ** b-tree page. */ + testcase( pCell+nCell+1==pPage->aDataEnd ); + c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey); + }else if( !(pCell[1] & 0x80) + && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal + ){ + /* The record-size field is a 2 byte varint and the record + ** fits entirely on the main b-tree page. */ + testcase( pCell+nCell+2==pPage->aDataEnd ); + c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey); + }else{ + /* The record flows over onto one or more overflow pages. In + ** this case the whole cell needs to be parsed, a buffer allocated + ** and accessPayload() used to retrieve the record into the + ** buffer before VdbeRecordCompare() can be called. + ** + ** If the record is corrupt, the xRecordCompare routine may read + ** up to two varints past the end of the buffer. An extra 18 + ** bytes of padding is allocated at the end of the buffer in + ** case this happens. */ + void *pCellKey; + u8 * const pCellBody = pCell - pPage->childPtrSize; + const int nOverrun = 18; /* Size of the overrun padding */ + pPage->xParseCell(pPage, pCellBody, &pCur->info); + nCell = (int)pCur->info.nKey; + testcase( nCell<0 ); /* True if key size is 2^32 or more */ + testcase( nCell==0 ); /* Invalid key size: 0x80 0x80 0x00 */ + testcase( nCell==1 ); /* Invalid key size: 0x80 0x80 0x01 */ + testcase( nCell==2 ); /* Minimum legal index key size */ + if( nCell<2 || nCell/pCur->pBt->usableSize>pCur->pBt->nPage ){ + rc = SQLITE_CORRUPT_PAGE(pPage); + goto moveto_index_finish; + } + pCellKey = sqlite3Malloc( nCell+nOverrun ); + if( pCellKey==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto moveto_index_finish; } - assert( - (pIdxKey->errCode!=SQLITE_CORRUPT || c==0) - && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed) - ); - if( c<0 ){ - lwr = idx+1; - }else if( c>0 ){ - upr = idx-1; - }else{ - assert( c==0 ); - *pRes = 0; - rc = SQLITE_OK; - pCur->ix = (u16)idx; - if( pIdxKey->errCode ) rc = SQLITE_CORRUPT_BKPT; - goto moveto_finish; + pCur->ix = (u16)idx; + rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0); + memset(((u8*)pCellKey)+nCell,0,nOverrun); /* Fix uninit warnings */ + pCur->curFlags &= ~BTCF_ValidOvfl; + if( rc ){ + sqlite3_free(pCellKey); + goto moveto_index_finish; } - if( lwr>upr ) break; - assert( lwr+upr>=0 ); - idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2 */ + c = sqlite3VdbeRecordCompare(nCell, pCellKey, pIdxKey); + sqlite3_free(pCellKey); } + assert( + (pIdxKey->errCode!=SQLITE_CORRUPT || c==0) + && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed) + ); + if( c<0 ){ + lwr = idx+1; + }else if( c>0 ){ + upr = idx-1; + }else{ + assert( c==0 ); + *pRes = 0; + rc = SQLITE_OK; + pCur->ix = (u16)idx; + if( pIdxKey->errCode ) rc = SQLITE_CORRUPT_BKPT; + goto moveto_index_finish; + } + if( lwr>upr ) break; + assert( lwr+upr>=0 ); + idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2 */ } assert( lwr==upr+1 || (pPage->intKey && !pPage->leaf) ); assert( pPage->isInit ); if( pPage->leaf ){ - assert( pCur->ixpPage->nCell ); + assert( pCur->ixpPage->nCell || CORRUPT_DB ); pCur->ix = (u16)idx; *pRes = c; rc = SQLITE_OK; - goto moveto_finish; + goto moveto_index_finish; } -moveto_next_layer: if( lwr>=pPage->nCell ){ chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]); }else{ @@ -71145,7 +71983,7 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( rc = moveToChild(pCur, chldPg); if( rc ) break; } -moveto_finish: +moveto_index_finish: pCur->info.nSize = 0; assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); return rc; @@ -71246,16 +72084,6 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){ return SQLITE_CORRUPT_BKPT; } - /* If the database file is corrupt, it is possible for the value of idx - ** to be invalid here. This can only occur if a second cursor modifies - ** the page while cursor pCur is holding a reference to it. Which can - ** only happen if the database is corrupt in such a way as to link the - ** page into more than one b-tree structure. - ** - ** Update 2019-12-23: appears to long longer be possible after the - ** addition of anotherValidCursor() condition on balance_deeper(). */ - harmless( idx>pPage->nCell ); - if( idx>=pPage->nCell ){ if( !pPage->leaf ){ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); @@ -72167,16 +72995,24 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){ int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */ if( *pRC ) return; - assert( idx>=0 && idxnCell ); + assert( idx>=0 ); + assert( idxnCell ); assert( CORRUPT_DB || sz==cellSize(pPage, idx) ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->nFree>=0 ); data = pPage->aData; ptr = &pPage->aCellIdx[2*idx]; + assert( pPage->pBt->usableSize > (u32)(ptr-data) ); pc = get2byte(ptr); hdr = pPage->hdrOffset; - testcase( pc==get2byte(&data[hdr+5]) ); +#if 0 /* Not required. Omit for efficiency */ + if( pcnCell*2 ){ + *pRC = SQLITE_CORRUPT_BKPT; + return; + } +#endif + testcase( pc==(u32)get2byte(&data[hdr+5]) ); testcase( pc+sz==pPage->pBt->usableSize ); if( pc+sz > pPage->pBt->usableSize ){ *pRC = SQLITE_CORRUPT_BKPT; @@ -72468,7 +73304,7 @@ static int rebuildPage( assert( i(u32)usableSize) ){ j = 0; } + if( j>(u32)usableSize ){ j = 0; } memcpy(&pTmp[j], &aData[j], usableSize - j); for(k=0; pCArray->ixNx[k]<=i && ALWAYS(kpPg->aDataEnd ) goto editpage_fail; /* Add cells to the start of the page */ if( iNewpBt; assert( sqlite3_mutex_held(pBt->mutex) ); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); @@ -73129,6 +73965,7 @@ static int balance_nonroot( goto balance_cleanup; } } + nMaxCells += apOld[i]->nCell + ArraySize(pParent->apOvfl); if( (i--)==0 ) break; if( pParent->nOverflow && i+nxDiv==pParent->aiOvfl[0] ){ @@ -73170,7 +74007,6 @@ static int balance_nonroot( /* Make nMaxCells a multiple of 4 in order to preserve 8-byte ** alignment */ - nMaxCells = nOld*(MX_CELL(pBt) + ArraySize(pParent->apOvfl)); nMaxCells = (nMaxCells + 3)&~3; /* @@ -73453,7 +74289,9 @@ static int balance_nonroot( apOld[i] = 0; rc = sqlite3PagerWrite(pNew->pDbPage); nNew++; - if( sqlite3PagerPageRefcount(pNew->pDbPage)!=1+(i==(iParentIdx-nxDiv)) ){ + if( sqlite3PagerPageRefcount(pNew->pDbPage)!=1+(i==(iParentIdx-nxDiv)) + && rc==SQLITE_OK + ){ rc = SQLITE_CORRUPT_BKPT; } if( rc ) goto balance_cleanup; @@ -73654,7 +74492,7 @@ static int balance_nonroot( iOvflSpace += sz; assert( sz<=pBt->maxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); - for(k=0; b.ixNx[k]<=i && ALWAYS(kpDbPage)!=1 ){ + if( sqlite3PagerPageRefcount(pPage->pDbPage)!=1 || pPage->isInit ){ rc = SQLITE_CORRUPT_BKPT; }else{ if( iOffset+ovflPageSize<(u32)nTotal ){ @@ -74170,24 +75008,6 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND|BTREE_PREFORMAT))==flags ); assert( (flags & BTREE_PREFORMAT)==0 || seekResult || pCur->pKeyInfo==0 ); - if( pCur->eState==CURSOR_FAULT ){ - assert( pCur->skipNext!=SQLITE_OK ); - return pCur->skipNext; - } - - assert( cursorOwnsBtShared(pCur) ); - assert( (pCur->curFlags & BTCF_WriteFlag)!=0 - && pBt->inTransaction==TRANS_WRITE - && (pBt->btsFlags & BTS_READ_ONLY)==0 ); - assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); - - /* Assert that the caller has been consistent. If this cursor was opened - ** expecting an index b-tree, then the caller should be inserting blob - ** keys with no associated data. If the cursor was opened expecting an - ** intkey table, the caller should be inserting integer keys with a - ** blob of associated data. */ - assert( (flags & BTREE_PREFORMAT) || (pX->pKey==0)==(pCur->pKeyInfo==0) ); - /* Save the positions of any other cursors open on this table. ** ** In some cases, the call to btreeMoveto() below is a no-op. For @@ -74212,6 +75032,24 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( } } + if( pCur->eState>=CURSOR_REQUIRESEEK ){ + rc = moveToRoot(pCur); + if( rc && rc!=SQLITE_EMPTY ) return rc; + } + + assert( cursorOwnsBtShared(pCur) ); + assert( (pCur->curFlags & BTCF_WriteFlag)!=0 + && pBt->inTransaction==TRANS_WRITE + && (pBt->btsFlags & BTS_READ_ONLY)==0 ); + assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); + + /* Assert that the caller has been consistent. If this cursor was opened + ** expecting an index b-tree, then the caller should be inserting blob + ** keys with no associated data. If the cursor was opened expecting an + ** intkey table, the caller should be inserting integer keys with a + ** blob of associated data. */ + assert( (flags & BTREE_PREFORMAT) || (pX->pKey==0)==(pCur->pKeyInfo==0) ); + if( pCur->pKeyInfo==0 ){ assert( pX->pKey==0 ); /* If this is an insert into a table b-tree, invalidate any incrblob @@ -74251,7 +75089,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** to an adjacent cell. Move the cursor so that it is pointing either ** to the cell to be overwritten or an adjacent cell. */ - rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, flags!=0, &loc); + rc = sqlite3BtreeTableMoveto(pCur, pX->nKey, + (flags & BTREE_APPEND)!=0, &loc); if( rc ) return rc; } }else{ @@ -74274,13 +75113,11 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( r.aMem = pX->aMem; r.nField = pX->nMem; r.default_rc = 0; - r.errCode = 0; - r.r1 = 0; - r.r2 = 0; r.eqSeen = 0; - rc = sqlite3BtreeMovetoUnpacked(pCur, &r, 0, flags!=0, &loc); + rc = sqlite3BtreeIndexMoveto(pCur, &r, &loc); }else{ - rc = btreeMoveto(pCur, pX->pKey, pX->nKey, flags!=0, &loc); + rc = btreeMoveto(pCur, pX->pKey, pX->nKey, + (flags & BTREE_APPEND)!=0, &loc); } if( rc ) return rc; } @@ -74301,14 +75138,13 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( } } assert( pCur->eState==CURSOR_VALID - || (pCur->eState==CURSOR_INVALID && loc) - || CORRUPT_DB ); + || (pCur->eState==CURSOR_INVALID && loc) ); pPage = pCur->pPage; assert( pPage->intKey || pX->nKey>=0 || (flags & BTREE_PREFORMAT) ); assert( pPage->leaf || !pPage->intKey ); if( pPage->nFree<0 ){ - if( NEVER(pCur->eState>CURSOR_INVALID) ){ + if( pCur->eState>CURSOR_INVALID ){ rc = SQLITE_CORRUPT_BKPT; }else{ rc = btreeComputeFreeSpace(pPage); @@ -74343,7 +75179,10 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( idx = pCur->ix; if( loc==0 ){ CellInfo info; - assert( idxnCell ); + assert( idx>=0 ); + if( idx>=pPage->nCell ){ + return SQLITE_CORRUPT_BKPT; + } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ){ goto end_insert; @@ -74525,7 +75364,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 } }while( rc==SQLITE_OK && nOut>0 ); - if( rc==SQLITE_OK && nRem>0 ){ + if( rc==SQLITE_OK && nRem>0 && ALWAYS(pPgnoOut) ){ Pgno pgnoNew; MemPage *pNew = 0; rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); @@ -74571,14 +75410,13 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ Btree *p = pCur->pBtree; BtShared *pBt = p->pBt; - int rc; /* Return code */ - MemPage *pPage; /* Page to delete cell from */ - unsigned char *pCell; /* Pointer to cell to delete */ - int iCellIdx; /* Index of cell to delete */ - int iCellDepth; /* Depth of node containing pCell */ - CellInfo info; /* Size of the cell being deleted */ - int bSkipnext = 0; /* Leaf cursor in SKIPNEXT state */ - u8 bPreserve = flags & BTREE_SAVEPOSITION; /* Keep cursor valid */ + int rc; /* Return code */ + MemPage *pPage; /* Page to delete cell from */ + unsigned char *pCell; /* Pointer to cell to delete */ + int iCellIdx; /* Index of cell to delete */ + int iCellDepth; /* Depth of node containing pCell */ + CellInfo info; /* Size of the cell being deleted */ + u8 bPreserve; /* Keep cursor valid. 2 for CURSOR_SKIPNEXT */ assert( cursorOwnsBtShared(pCur) ); assert( pBt->inTransaction==TRANS_WRITE ); @@ -74587,28 +75425,45 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); assert( !hasReadConflicts(p, pCur->pgnoRoot) ); assert( (flags & ~(BTREE_SAVEPOSITION | BTREE_AUXDELETE))==0 ); - if( pCur->eState==CURSOR_REQUIRESEEK ){ - rc = btreeRestoreCursorPosition(pCur); - assert( rc!=SQLITE_OK || CORRUPT_DB || pCur->eState==CURSOR_VALID ); - if( rc || pCur->eState!=CURSOR_VALID ) return rc; + if( pCur->eState!=CURSOR_VALID ){ + if( pCur->eState>=CURSOR_REQUIRESEEK ){ + rc = btreeRestoreCursorPosition(pCur); + assert( rc!=SQLITE_OK || CORRUPT_DB || pCur->eState==CURSOR_VALID ); + if( rc || pCur->eState!=CURSOR_VALID ) return rc; + }else{ + return SQLITE_CORRUPT_BKPT; + } } - assert( CORRUPT_DB || pCur->eState==CURSOR_VALID ); + assert( pCur->eState==CURSOR_VALID ); iCellDepth = pCur->iPage; iCellIdx = pCur->ix; pPage = pCur->pPage; + if( pPage->nCell<=iCellIdx ){ + return SQLITE_CORRUPT_BKPT; + } pCell = findCell(pPage, iCellIdx); - if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ) return SQLITE_CORRUPT; + if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){ + return SQLITE_CORRUPT_BKPT; + } - /* If the bPreserve flag is set to true, then the cursor position must + /* If the BTREE_SAVEPOSITION bit is on, then the cursor position must ** be preserved following this delete operation. If the current delete ** will cause a b-tree rebalance, then this is done by saving the cursor ** key and leaving the cursor in CURSOR_REQUIRESEEK state before ** returning. ** - ** Or, if the current delete will not cause a rebalance, then the cursor + ** If the current delete will not cause a rebalance, then the cursor ** will be left in CURSOR_SKIPNEXT state pointing to the entry immediately - ** before or after the deleted entry. In this case set bSkipnext to true. */ + ** before or after the deleted entry. + ** + ** The bPreserve value records which path is required: + ** + ** bPreserve==0 Not necessary to save the cursor position + ** bPreserve==1 Use CURSOR_REQUIRESEEK to save the cursor position + ** bPreserve==2 Cursor won't move. Set CURSOR_SKIPNEXT. + */ + bPreserve = (flags & BTREE_SAVEPOSITION)!=0; if( bPreserve ){ if( !pPage->leaf || (pPage->nFree+cellSizePtr(pPage,pCell)+2)>(int)(pBt->usableSize*2/3) @@ -74619,7 +75474,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ rc = saveCursorKey(pCur); if( rc ) return rc; }else{ - bSkipnext = 1; + bPreserve = 2; } } @@ -74719,8 +75574,8 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ } if( rc==SQLITE_OK ){ - if( bSkipnext ){ - assert( bPreserve && (pCur->iPage==iCellDepth || CORRUPT_DB) ); + if( bPreserve>1 ){ + assert( (pCur->iPage==iCellDepth || CORRUPT_DB) ); assert( pPage==pCur->pPage || CORRUPT_DB ); assert( (pPage->nCell>0 || CORRUPT_DB) && iCellIdx<=pPage->nCell ); pCur->eState = CURSOR_SKIPNEXT; @@ -74914,7 +75769,7 @@ static int clearDatabasePage( BtShared *pBt, /* The BTree that contains the table */ Pgno pgno, /* Page number to clear */ int freePageFlag, /* Deallocate page if true */ - int *pnChange /* Add number of Cells freed to this counter */ + i64 *pnChange /* Add number of Cells freed to this counter */ ){ MemPage *pPage; int rc; @@ -74929,11 +75784,12 @@ static int clearDatabasePage( } rc = getAndInitPage(pBt, pgno, &pPage, 0, 0); if( rc ) return rc; - if( pPage->bBusy ){ + if( (pBt->openFlags & BTREE_SINGLE)==0 + && sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1)) + ){ rc = SQLITE_CORRUPT_BKPT; goto cleardatabasepage_out; } - pPage->bBusy = 1; hdr = pPage->hdrOffset; for(i=0; inCell; i++){ pCell = findCell(pPage, i); @@ -74947,6 +75803,7 @@ static int clearDatabasePage( if( !pPage->leaf ){ rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange); if( rc ) goto cleardatabasepage_out; + if( pPage->intKey ) pnChange = 0; } if( pnChange ){ testcase( !pPage->intKey ); @@ -74959,7 +75816,6 @@ static int clearDatabasePage( } cleardatabasepage_out: - pPage->bBusy = 0; releasePage(pPage); return rc; } @@ -74976,7 +75832,7 @@ static int clearDatabasePage( ** If pnChange is not NULL, then the integer value pointed to by pnChange ** is incremented by the number of entries in the table. */ -SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ +SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, i64 *pnChange){ int rc; BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); @@ -75038,10 +75894,10 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ return SQLITE_CORRUPT_BKPT; } - rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0); - if( rc ) return rc; rc = sqlite3BtreeClearTable(p, iTable, 0); - if( rc ){ + if( rc ) return rc; + rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0); + if( NEVER(rc) ){ releasePage(pPage); return rc; } @@ -76309,14 +77165,13 @@ static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){ if( i==1 ){ Parse sParse; int rc = 0; - memset(&sParse, 0, sizeof(sParse)); - sParse.db = pDb; + sqlite3ParseObjectInit(&sParse,pDb); if( sqlite3OpenTempDatabase(&sParse) ){ sqlite3ErrorWithMsg(pErrorDb, sParse.rc, "%s", sParse.zErrMsg); rc = SQLITE_ERROR; } sqlite3DbFree(pErrorDb, sParse.zErrMsg); - sqlite3ParserReset(&sParse); + sqlite3ParseObjectReset(&sParse); if( rc ){ return 0; } @@ -77198,10 +78053,15 @@ SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){ #ifndef SQLITE_OMIT_UTF16 int rc; #endif + assert( pMem!=0 ); assert( !sqlite3VdbeMemIsRowSet(pMem) ); assert( desiredEnc==SQLITE_UTF8 || desiredEnc==SQLITE_UTF16LE || desiredEnc==SQLITE_UTF16BE ); - if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){ + if( !(pMem->flags&MEM_Str) ){ + pMem->enc = desiredEnc; + return SQLITE_OK; + } + if( pMem->enc==desiredEnc ){ return SQLITE_OK; } assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); @@ -77330,6 +78190,7 @@ static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){ ** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails. */ SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){ + assert( pMem!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( !sqlite3VdbeMemIsRowSet(pMem) ); if( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ){ @@ -77354,6 +78215,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){ #ifndef SQLITE_OMIT_INCRBLOB SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){ int nByte; + assert( pMem!=0 ); assert( pMem->flags & MEM_Zero ); assert( (pMem->flags&MEM_Blob)!=0 || MemNullNochng(pMem) ); testcase( sqlite3_value_nochange(pMem) ); @@ -77369,6 +78231,8 @@ SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){ if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){ return SQLITE_NOMEM_BKPT; } + assert( pMem->z!=0 ); + assert( sqlite3DbMallocSize(pMem->db,pMem->z) >= nByte ); memset(&pMem->z[pMem->n], 0, pMem->u.nZero); pMem->n += pMem->u.nZero; @@ -77381,6 +78245,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){ ** Make sure the given Mem is \u0000 terminated. */ SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){ + assert( pMem!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); testcase( (pMem->flags & (MEM_Term|MEM_Str))==(MEM_Term|MEM_Str) ); testcase( (pMem->flags & (MEM_Term|MEM_Str))==0 ); @@ -77408,6 +78273,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){ const int nByte = 32; + assert( pMem!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( !(pMem->flags&MEM_Zero) ); assert( !(pMem->flags&(MEM_Str|MEM_Blob)) ); @@ -77443,6 +78309,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){ sqlite3_context ctx; Mem t; assert( pFunc!=0 ); + assert( pMem!=0 ); assert( pFunc->xFinalize!=0 ); assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); @@ -77586,13 +78453,14 @@ static SQLITE_NOINLINE i64 doubleToInt64(double r){ ** ** If pMem represents a string value, its encoding might be changed. */ -static SQLITE_NOINLINE i64 memIntValue(Mem *pMem){ +static SQLITE_NOINLINE i64 memIntValue(const Mem *pMem){ i64 value = 0; sqlite3Atoi64(pMem->z, &value, pMem->n, pMem->enc); return value; } -SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem *pMem){ +SQLITE_PRIVATE i64 sqlite3VdbeIntValue(const Mem *pMem){ int flags; + assert( pMem!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); flags = pMem->flags; @@ -77621,6 +78489,7 @@ static SQLITE_NOINLINE double memRealValue(Mem *pMem){ return val; } SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){ + assert( pMem!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); if( pMem->flags & MEM_Real ){ @@ -77653,6 +78522,7 @@ SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem *pMem, int ifNull){ */ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){ i64 ix; + assert( pMem!=0 ); assert( pMem->flags & MEM_Real ); assert( !sqlite3VdbeMemIsRowSet(pMem) ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); @@ -77680,6 +78550,7 @@ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){ ** Convert pMem to type integer. Invalidate any prior representations. */ SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem *pMem){ + assert( pMem!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( !sqlite3VdbeMemIsRowSet(pMem) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); @@ -77694,6 +78565,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem *pMem){ ** Invalidate any prior representations. */ SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem *pMem){ + assert( pMem!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); @@ -77727,6 +78599,7 @@ SQLITE_PRIVATE int sqlite3RealSameAsInt(double r1, sqlite3_int64 i){ ** as much of the string as we can and ignore the rest. */ SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){ + assert( pMem!=0 ); testcase( pMem->flags & MEM_Int ); testcase( pMem->flags & MEM_Real ); testcase( pMem->flags & MEM_IntReal ); @@ -77836,6 +78709,7 @@ SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value *p){ ** Delete any previous value and set the value to be a BLOB of length ** n containing all zeros. */ +#ifndef SQLITE_OMIT_INCRBLOB SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){ sqlite3VdbeMemRelease(pMem); pMem->flags = MEM_Blob|MEM_Zero; @@ -77845,6 +78719,21 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){ pMem->enc = SQLITE_UTF8; pMem->z = 0; } +#else +SQLITE_PRIVATE int sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){ + int nByte = n>0?n:1; + if( sqlite3VdbeMemGrow(pMem, nByte, 0) ){ + return SQLITE_NOMEM_BKPT; + } + assert( pMem->z!=0 ); + assert( sqlite3DbMallocSize(pMem->db, pMem->z)>=nByte ); + memset(pMem->z, 0, nByte); + pMem->n = n>0?n:0; + pMem->flags = MEM_Blob; + pMem->enc = SQLITE_UTF8; + return SQLITE_OK; +} +#endif /* ** The pMem is known to contain content that needs to be destroyed prior @@ -77884,6 +78773,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetPointer( void (*xDestructor)(void*) ){ assert( pMem->flags==MEM_Null ); + vdbeMemClear(pMem); pMem->u.zPType = zPType ? zPType : ""; pMem->z = pPtr; pMem->flags = MEM_Null|MEM_Dyn|MEM_Subtype|MEM_Term; @@ -78078,6 +78968,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( int iLimit; /* Maximum allowed string or blob size */ u16 flags = 0; /* New value for pMem->flags */ + assert( pMem!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( !sqlite3VdbeMemIsRowSet(pMem) ); @@ -78386,7 +79277,7 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){ #ifdef SQLITE_ENABLE_STAT4 static int valueFromFunction( sqlite3 *db, /* The database connection */ - Expr *p, /* The expression to evaluate */ + const Expr *p, /* The expression to evaluate */ u8 enc, /* Encoding to use */ u8 aff, /* Affinity to use */ sqlite3_value **ppVal, /* Write the new value here */ @@ -78403,8 +79294,10 @@ static int valueFromFunction( assert( pCtx!=0 ); assert( (p->flags & EP_TokenOnly)==0 ); + assert( ExprUseXList(p) ); pList = p->x.pList; if( pList ) nVal = pList->nExpr; + assert( !ExprHasProperty(p, EP_IntValue) ); pFunc = sqlite3FindFunction(db, p->u.zToken, nVal, enc, 0); assert( pFunc ); if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 @@ -78480,7 +79373,7 @@ static int valueFromFunction( */ static int valueFromExpr( sqlite3 *db, /* The database connection */ - Expr *pExpr, /* The expression to evaluate */ + const Expr *pExpr, /* The expression to evaluate */ u8 enc, /* Encoding to use */ u8 affinity, /* Affinity to use */ sqlite3_value **ppVal, /* Write the new value here */ @@ -78495,11 +79388,7 @@ static int valueFromExpr( assert( pExpr!=0 ); while( (op = pExpr->op)==TK_UPLUS || op==TK_SPAN ) pExpr = pExpr->pLeft; -#if defined(SQLITE_ENABLE_STAT4) if( op==TK_REGISTER ) op = pExpr->op2; -#else - if( NEVER(op==TK_REGISTER) ) op = pExpr->op2; -#endif /* Compressed expressions only appear when parsing the DEFAULT clause ** on a table column definition, and hence only when pCtx==0. This @@ -78508,7 +79397,9 @@ static int valueFromExpr( assert( (pExpr->flags & EP_TokenOnly)==0 || pCtx==0 ); if( op==TK_CAST ){ - u8 aff = sqlite3AffinityType(pExpr->u.zToken,0); + u8 aff; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + aff = sqlite3AffinityType(pExpr->u.zToken,0); rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx); testcase( rc!=SQLITE_OK ); if( *ppVal ){ @@ -78581,6 +79472,7 @@ static int valueFromExpr( #ifndef SQLITE_OMIT_BLOB_LITERAL else if( op==TK_BLOB ){ int nVal; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); assert( pExpr->u.zToken[1]=='\'' ); pVal = valueNew(db, pCtx); @@ -78598,6 +79490,7 @@ static int valueFromExpr( } #endif else if( op==TK_TRUEFALSE ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); pVal = valueNew(db, pCtx); if( pVal ){ pVal->flags = MEM_Int; @@ -78610,7 +79503,7 @@ static int valueFromExpr( no_mem: #ifdef SQLITE_ENABLE_STAT4 - if( pCtx==0 || pCtx->pParse->nErr==0 ) + if( pCtx==0 || NEVER(pCtx->pParse->nErr==0) ) #endif sqlite3OomFault(db); sqlite3DbFree(db, zVal); @@ -78635,7 +79528,7 @@ static int valueFromExpr( */ SQLITE_PRIVATE int sqlite3ValueFromExpr( sqlite3 *db, /* The database connection */ - Expr *pExpr, /* The expression to evaluate */ + const Expr *pExpr, /* The expression to evaluate */ u8 enc, /* Encoding to use */ u8 affinity, /* Affinity to use */ sqlite3_value **ppVal /* Write the new value here */ @@ -79151,8 +80044,10 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ if( p->nOpAlloc<=i ){ return growOp3(p, op, p1, p2, p3); } + assert( p->aOp!=0 ); p->nOp++; pOp = &p->aOp[i]; + assert( pOp!=0 ); pOp->opcode = (u8)op; pOp->p5 = 0; pOp->p1 = p1; @@ -80286,8 +81181,7 @@ SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){ */ static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){ assert( p->nOp>0 || p->aOp==0 ); - assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed - || p->pParse->nErr>0 ); + assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->pParse->nErr>0 ); if( p->nOp ){ assert( p->aOp ); sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment); @@ -80395,7 +81289,7 @@ SQLITE_PRIVATE char *sqlite3VdbeDisplayComment( if( zOpName[nOpName+1] ){ int seenCom = 0; char c; - zSynopsis = zOpName += nOpName + 1; + zSynopsis = zOpName + nOpName + 1; if( strncmp(zSynopsis,"IF ",3)==0 ){ sqlite3_snprintf(sizeof(zAlt), zAlt, "if %s goto P2", zSynopsis+3); zSynopsis = zAlt; @@ -80468,6 +81362,7 @@ static void displayP4Expr(StrAccum *p, Expr *pExpr){ const char *zOp = 0; switch( pExpr->op ){ case TK_STRING: + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3_str_appendf(p, "%Q", pExpr->u.zToken); break; case TK_INTEGER: @@ -80570,7 +81465,7 @@ SQLITE_PRIVATE char *sqlite3VdbeDisplayP4(sqlite3 *db, Op *pOp){ case P4_COLLSEQ: { static const char *const encnames[] = {"?", "8", "16LE", "16BE"}; CollSeq *pColl = pOp->p4.pColl; - assert( pColl->enc>=0 && pColl->enc<4 ); + assert( pColl->enc<4 ); sqlite3_str_appendf(&x, "%.18s-%s", pColl->zName, encnames[pColl->enc]); break; @@ -80814,8 +81709,8 @@ static void releaseMemArray(Mem *p, int N){ */ testcase( p->flags & MEM_Agg ); testcase( p->flags & MEM_Dyn ); - testcase( p->xDel==sqlite3VdbeFrameMemDel ); if( p->flags&(MEM_Agg|MEM_Dyn) ){ + testcase( (p->flags & MEM_Dyn)!=0 && p->xDel==sqlite3VdbeFrameMemDel ); sqlite3VdbeMemRelease(p); }else if( p->szMalloc ){ sqlite3DbFreeNN(db, p->zMalloc); @@ -81377,8 +82272,6 @@ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ if( pCx==0 ){ return; } - assert( pCx->pBtx==0 || pCx->eCurType==CURTYPE_BTREE ); - assert( pCx->pBtx==0 || pCx->isEphemeral ); switch( pCx->eCurType ){ case CURTYPE_SORTER: { sqlite3VdbeSorterClose(p->db, pCx); @@ -81915,9 +82808,9 @@ SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){ ** has made changes and is in autocommit mode, then commit those ** changes. If a rollback is needed, then do the rollback. ** -** This routine is the only way to move the state of a VM from -** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. It is harmless to -** call this on a VM that is in the SQLITE_MAGIC_HALT state. +** This routine is the only way to move the sqlite3eOpenState of a VM from +** SQLITE_STATE_RUN to SQLITE_STATE_HALT. It is harmless to +** call this on a VM that is in the SQLITE_STATE_HALT state. ** ** Return an error code. If the commit could not complete because of ** lock contention, return SQLITE_BUSY. If SQLITE_BUSY is returned, it @@ -81963,9 +82856,15 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ sqlite3VdbeEnter(p); /* Check for one of the special errors */ - mrc = p->rc & 0xff; - isSpecialError = mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR - || mrc==SQLITE_INTERRUPT || mrc==SQLITE_FULL; + if( p->rc ){ + mrc = p->rc & 0xff; + isSpecialError = mrc==SQLITE_NOMEM + || mrc==SQLITE_IOERR + || mrc==SQLITE_INTERRUPT + || mrc==SQLITE_FULL; + }else{ + mrc = isSpecialError = 0; + } if( isSpecialError ){ /* If the query was read-only and the error code is SQLITE_INTERRUPT, ** no rollback is necessary. Otherwise, at least a savepoint @@ -82017,6 +82916,9 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ return SQLITE_ERROR; } rc = SQLITE_CONSTRAINT_FOREIGNKEY; + }else if( db->flags & SQLITE_CorruptRdOnly ){ + rc = SQLITE_CORRUPT; + db->flags &= ~SQLITE_CorruptRdOnly; }else{ /* The auto-commit flag is true, the vdbe program was successful ** or hit an 'OR FAIL' constraint and there are no deferred foreign @@ -82150,6 +83052,7 @@ SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p){ sqlite3ValueSetNull(db->pErr); } db->errCode = rc; + db->errByteOffset = -1; return rc; } @@ -82411,7 +83314,7 @@ SQLITE_PRIVATE int SQLITE_NOINLINE sqlite3VdbeFinishMoveto(VdbeCursor *p){ assert( p->deferredMoveto ); assert( p->isTable ); assert( p->eCurType==CURTYPE_BTREE ); - rc = sqlite3BtreeMovetoUnpacked(p->uc.pCursor, 0, p->movetoTarget, 0, &res); + rc = sqlite3BtreeTableMoveto(p->uc.pCursor, p->movetoTarget, 0, &res); if( rc ) return rc; if( res!=0 ) return SQLITE_CORRUPT_BKPT; #ifdef SQLITE_TEST @@ -82471,7 +83374,7 @@ SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor **pp, u32 *piCol){ if( p->deferredMoveto ){ u32 iMap; assert( !p->isEphemeral ); - if( p->aAltMap && (iMap = p->aAltMap[1+*piCol])>0 && !p->nullRow ){ + if( p->ub.aAltMap && (iMap = p->ub.aAltMap[1+*piCol])>0 && !p->nullRow ){ *pp = p->pAltCursor; *piCol = iMap - 1; return SQLITE_OK; @@ -82749,14 +83652,14 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){ /* ** Deserialize the data blob pointed to by buf as serial type serial_type -** and store the result in pMem. Return the number of bytes read. +** and store the result in pMem. ** ** This function is implemented as two separate routines for performance. ** The few cases that require local variables are broken out into a separate ** routine so that in most cases the overhead of moving the stack pointer ** is avoided. */ -static u32 serialGet( +static void serialGet( const unsigned char *buf, /* Buffer to deserialize from */ u32 serial_type, /* Serial type to deserialize */ Mem *pMem /* Memory cell to write value into */ @@ -82790,9 +83693,8 @@ static u32 serialGet( memcpy(&pMem->u.r, &x, sizeof(x)); pMem->flags = IsNaN(x) ? MEM_Null : MEM_Real; } - return 8; } -SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( +SQLITE_PRIVATE void sqlite3VdbeSerialGet( const unsigned char *buf, /* Buffer to deserialize from */ u32 serial_type, /* Serial type to deserialize */ Mem *pMem /* Memory cell to write value into */ @@ -82803,13 +83705,13 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( pMem->flags = MEM_Null|MEM_Zero; pMem->n = 0; pMem->u.nZero = 0; - break; + return; } case 11: /* Reserved for future use */ case 0: { /* Null */ /* EVIDENCE-OF: R-24078-09375 Value is a NULL. */ pMem->flags = MEM_Null; - break; + return; } case 1: { /* EVIDENCE-OF: R-44885-25196 Value is an 8-bit twos-complement @@ -82817,7 +83719,7 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( pMem->u.i = ONE_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); - return 1; + return; } case 2: { /* 2-byte signed integer */ /* EVIDENCE-OF: R-49794-35026 Value is a big-endian 16-bit @@ -82825,7 +83727,7 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( pMem->u.i = TWO_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); - return 2; + return; } case 3: { /* 3-byte signed integer */ /* EVIDENCE-OF: R-37839-54301 Value is a big-endian 24-bit @@ -82833,7 +83735,7 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( pMem->u.i = THREE_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); - return 3; + return; } case 4: { /* 4-byte signed integer */ /* EVIDENCE-OF: R-01849-26079 Value is a big-endian 32-bit @@ -82845,7 +83747,7 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( #endif pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); - return 4; + return; } case 5: { /* 6-byte signed integer */ /* EVIDENCE-OF: R-50385-09674 Value is a big-endian 48-bit @@ -82853,13 +83755,14 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( pMem->u.i = FOUR_BYTE_UINT(buf+2) + (((i64)1)<<32)*TWO_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); - return 6; + return; } case 6: /* 8-byte signed integer */ case 7: { /* IEEE floating point */ /* These use local variables, so do them in a separate routine ** to avoid having to move the frame pointer in the common case */ - return serialGet(buf,serial_type,pMem); + serialGet(buf,serial_type,pMem); + return; } case 8: /* Integer 0 */ case 9: { /* Integer 1 */ @@ -82867,7 +83770,7 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( /* EVIDENCE-OF: R-18143-12121 Value is the integer 1. */ pMem->u.i = serial_type-8; pMem->flags = MEM_Int; - return 0; + return; } default: { /* EVIDENCE-OF: R-14606-31564 Value is a BLOB that is (N-12)/2 bytes in @@ -82878,10 +83781,10 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( pMem->z = (char *)buf; pMem->n = (serial_type-12)/2; pMem->flags = aFlag[serial_type&1]; - return pMem->n; + return; } } - return 0; + return; } /* ** This routine is used to allocate sufficient space for an UnpackedRecord @@ -82944,7 +83847,8 @@ SQLITE_PRIVATE void sqlite3VdbeRecordUnpack( /* pMem->flags = 0; // sqlite3VdbeSerialGet() will set this for us */ pMem->szMalloc = 0; pMem->z = 0; - d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem); + sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem); + d += sqlite3VdbeSerialTypeLen(serial_type); pMem++; if( (++u)>=p->nField ) break; } @@ -83028,7 +83932,8 @@ static int vdbeRecordCompareDebug( /* Extract the values to be compared. */ - d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1); + sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1); + d1 += sqlite3VdbeSerialTypeLen(serial_type1); /* Do the comparison */ @@ -83195,7 +84100,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem ** number. Return negative, zero, or positive if the first (i64) is less than, ** equal to, or greater than the second (double). */ -static int sqlite3IntFloatCompare(i64 i, double r){ +SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){ if( sizeof(LONGDOUBLE_TYPE)>8 ){ LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i; testcase( x0x7fffffff ); assert( m.n>=0 ); if( unlikely(szHdr<3 || szHdr>(unsigned)m.n) ){ @@ -83919,7 +84824,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare( ** This routine sets the value to be returned by subsequent calls to ** sqlite3_changes() on the database handle 'db'. */ -SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *db, int nChange){ +SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *db, i64 nChange){ assert( sqlite3_mutex_held(db->mutex) ); db->nChange = nChange; db->nTotalChange += nChange; @@ -84121,6 +85026,8 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( } } + assert( pCsr!=0 ); + assert( pCsr->eCurType==CURTYPE_BTREE ); assert( pCsr->nField==pTab->nCol || (pCsr->nField==pTab->nCol+1 && op==SQLITE_DELETE && iReg==-1) ); @@ -84520,8 +85427,8 @@ SQLITE_API void sqlite3_value_free(sqlite3_value *pOld){ ** the function result. ** ** The setStrOrError() function calls sqlite3VdbeMemSetStr() to store the -** result as a string or blob but if the string or blob is too large, it -** then sets the error code to SQLITE_TOOBIG +** result as a string or blob. Appropriate errors are set if the string/blob +** is too big or if an OOM occurs. ** ** The invokeValueDestructor(P,X) routine invokes destructor function X() ** on value P is not going to be used and need to be destroyed. @@ -84533,8 +85440,16 @@ static void setResultStrOrError( u8 enc, /* Encoding of z. 0 for BLOBs */ void (*xDel)(void*) /* Destructor function */ ){ - if( sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel)==SQLITE_TOOBIG ){ - sqlite3_result_error_toobig(pCtx); + int rc = sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel); + if( rc ){ + if( rc==SQLITE_TOOBIG ){ + sqlite3_result_error_toobig(pCtx); + }else{ + /* The only errors possible from sqlite3VdbeMemSetStr are + ** SQLITE_TOOBIG and SQLITE_NOMEM */ + assert( rc==SQLITE_NOMEM ); + sqlite3_result_error_nomem(pCtx); + } } } static int invokeValueDestructor( @@ -84691,8 +85606,12 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){ if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){ return SQLITE_TOOBIG; } +#ifndef SQLITE_OMIT_INCRBLOB sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n); return SQLITE_OK; +#else + return sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n); +#endif } SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ pCtx->isError = errCode ? errCode : -1; @@ -84992,6 +85911,70 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){ return sqlite3_value_nochange(p->pOut); } +/* +** Implementation of sqlite3_vtab_in_first() (if bNext==0) and +** sqlite3_vtab_in_next() (if bNext!=0). +*/ +static int valueFromValueList( + sqlite3_value *pVal, /* Pointer to the ValueList object */ + sqlite3_value **ppOut, /* Store the next value from the list here */ + int bNext /* 1 for _next(). 0 for _first() */ +){ + int rc; + ValueList *pRhs; + + *ppOut = 0; + if( pVal==0 ) return SQLITE_MISUSE; + pRhs = (ValueList*)sqlite3_value_pointer(pVal, "ValueList"); + if( pRhs==0 ) return SQLITE_MISUSE; + if( bNext ){ + rc = sqlite3BtreeNext(pRhs->pCsr, 0); + }else{ + int dummy = 0; + rc = sqlite3BtreeFirst(pRhs->pCsr, &dummy); + assert( rc==SQLITE_OK || sqlite3BtreeEof(pRhs->pCsr) ); + if( sqlite3BtreeEof(pRhs->pCsr) ) rc = SQLITE_DONE; + } + if( rc==SQLITE_OK ){ + u32 sz; /* Size of current row in bytes */ + Mem sMem; /* Raw content of current row */ + memset(&sMem, 0, sizeof(sMem)); + sz = sqlite3BtreePayloadSize(pRhs->pCsr); + rc = sqlite3VdbeMemFromBtreeZeroOffset(pRhs->pCsr,(int)sz,&sMem); + if( rc==SQLITE_OK ){ + u8 *zBuf = (u8*)sMem.z; + u32 iSerial; + sqlite3_value *pOut = pRhs->pOut; + int iOff = 1 + getVarint32(&zBuf[1], iSerial); + sqlite3VdbeSerialGet(&zBuf[iOff], iSerial, pOut); + pOut->enc = ENC(pOut->db); + if( (pOut->flags & MEM_Ephem)!=0 && sqlite3VdbeMemMakeWriteable(pOut) ){ + rc = SQLITE_NOMEM; + }else{ + *ppOut = pOut; + } + } + sqlite3VdbeMemRelease(&sMem); + } + return rc; +} + +/* +** Set the iterator value pVal to point to the first value in the set. +** Set (*ppOut) to point to this value before returning. +*/ +SQLITE_API int sqlite3_vtab_in_first(sqlite3_value *pVal, sqlite3_value **ppOut){ + return valueFromValueList(pVal, ppOut, 0); +} + +/* +** Set the iterator value pVal to point to the next value in the set. +** Set (*ppOut) to point to this value before returning. +*/ +SQLITE_API int sqlite3_vtab_in_next(sqlite3_value *pVal, sqlite3_value **ppOut){ + return valueFromValueList(pVal, ppOut, 1); +} + /* ** Return the current time for a statement. If the current time ** is requested more than once within the same run of a single prepared @@ -85676,7 +86659,10 @@ SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_valu break; } case SQLITE_FLOAT: { - rc = sqlite3_bind_double(pStmt, i, pValue->u.r); + assert( pValue->flags & (MEM_Real|MEM_IntReal) ); + rc = sqlite3_bind_double(pStmt, i, + (pValue->flags & MEM_Real) ? pValue->u.r : (double)pValue->u.i + ); break; } case SQLITE_BLOB: { @@ -85704,7 +86690,11 @@ SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){ Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ +#ifndef SQLITE_OMIT_INCRBLOB sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); +#else + rc = sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); +#endif sqlite3_mutex_leave(p->db->mutex); } return rc; @@ -85992,6 +86982,7 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa u32 nRec; u8 *aRec; + assert( p->pCsr->eCurType==CURTYPE_BTREE ); nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor); aRec = sqlite3DbMallocRaw(db, nRec); if( !aRec ) goto preupdate_old_out; @@ -86299,11 +87290,9 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( #ifndef SQLITE_OMIT_UTF16 Mem utf8; /* Used to convert UTF16 into UTF8 for display */ #endif - char zBase[100]; /* Initial working space */ db = p->db; - sqlite3StrAccumInit(&out, 0, zBase, sizeof(zBase), - db->aLimit[SQLITE_LIMIT_LENGTH]); + sqlite3StrAccumInit(&out, 0, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]); if( db->nVdbeExec>1 ){ while( *zRawSql ){ const char *zStart = zRawSql; @@ -86653,7 +87642,6 @@ static VdbeCursor *allocateCursor( Vdbe *p, /* The virtual machine */ int iCur, /* Index of the new VdbeCursor */ int nField, /* Number of fields in the table or index */ - int iDb, /* Database the cursor belongs to, or -1 */ u8 eCurType /* Type of the new cursor */ ){ /* Find the memory cell that will be used to store the blob of memory @@ -86710,7 +87698,6 @@ static VdbeCursor *allocateCursor( p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->zMalloc; memset(pCx, 0, offsetof(VdbeCursor,pAltCursor)); pCx->eCurType = eCurType; - pCx->iDb = iDb; pCx->nField = nField; pCx->aOffset = &pCx->aType[nField]; if( eCurType==CURTYPE_BTREE ){ @@ -87036,96 +88023,7 @@ SQLITE_PRIVATE void sqlite3VdbeRegisterDump(Vdbe *v){ ** hwtime.h contains inline assembler code for implementing ** high-performance timing routines. */ -/************** Include hwtime.h in the middle of vdbe.c *********************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 and x86_64 class CPUs. -*/ -#ifndef SQLITE_HWTIME_H -#define SQLITE_HWTIME_H - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if !defined(__STRICT_ANSI__) && \ - (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - /* - ** asm() is needed for hardware timing support. Without asm(), - ** disable the sqlite3Hwtime() routine. - ** - ** sqlite3Hwtime() is only used for some obscure debugging - ** and analysis configurations, not in any deliverable, so this - ** should not be a great loss. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(SQLITE_HWTIME_H) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in vdbe.c ***********************/ +/* #include "hwtime.h" */ #endif @@ -87172,6 +88070,42 @@ static Mem *out2Prerelease(Vdbe *p, VdbeOp *pOp){ } } +/* +** Compute a bloom filter hash using pOp->p4.i registers from aMem[] beginning +** with pOp->p3. Return the hash. +*/ +static u64 filterHash(const Mem *aMem, const Op *pOp){ + int i, mx; + u64 h = 0; + + assert( pOp->p4type==P4_INT32 ); + for(i=pOp->p3, mx=i+pOp->p4.i; iflags & (MEM_Int|MEM_IntReal) ){ + h += p->u.i; + }else if( p->flags & MEM_Real ){ + h += sqlite3VdbeIntValue(p); + }else if( p->flags & (MEM_Str|MEM_Blob) ){ + h += p->n; + if( p->flags & MEM_Zero ) h += p->u.nZero; + } + } + return h; +} + +/* +** Return the symbolic name for the data type of a pMem +*/ +static const char *vdbeMemTypeName(Mem *pMem){ + static const char *azTypes[] = { + /* SQLITE_INTEGER */ "INT", + /* SQLITE_FLOAT */ "REAL", + /* SQLITE_TEXT */ "TEXT", + /* SQLITE_BLOB */ "BLOB", + /* SQLITE_NULL */ "NULL" + }; + return azTypes[sqlite3_value_type(pMem)-1]; +} /* ** Execute as much of a VDBE program as we can. @@ -87454,6 +88388,8 @@ case OP_Gosub: { /* jump */ /* Most jump operations do a goto to this spot in order to update ** the pOp pointer. */ jump_to_p2: + assert( pOp->p2>0 ); /* There are never any jumps to instruction 0 */ + assert( pOp->p2nOp ); /* Jumps must be in range */ pOp = &aOp[pOp->p2 - 1]; break; } @@ -87813,12 +88749,18 @@ case OP_SoftNull: { ** Synopsis: r[P2]=P4 (len=P1) ** ** P4 points to a blob of data P1 bytes long. Store this -** blob in register P2. +** blob in register P2. If P4 is a NULL pointer, then construct +** a zero-filled blob that is P1 bytes long in P2. */ case OP_Blob: { /* out2 */ assert( pOp->p1 <= SQLITE_MAX_LENGTH ); pOut = out2Prerelease(p, pOp); - sqlite3VdbeMemSetStr(pOut, pOp->p4.z, pOp->p1, 0, 0); + if( pOp->p4.z==0 ){ + sqlite3VdbeMemSetZeroBlob(pOut, pOp->p1); + if( sqlite3VdbeMemExpandBlob(pOut) ) goto no_mem; + }else{ + sqlite3VdbeMemSetStr(pOut, pOp->p4.z, pOp->p1, 0, 0); + } pOut->enc = encoding; UPDATE_MAX_BLOBSIZE(pOut); break; @@ -87967,24 +88909,22 @@ case OP_IntCopy: { /* out2 */ break; } -/* Opcode: ChngCntRow P1 P2 * * * -** Synopsis: output=r[P1] +/* Opcode: FkCheck * * * * * ** -** Output value in register P1 as the chance count for a DML statement, -** due to the "PRAGMA count_changes=ON" setting. Or, if there was a -** foreign key error in the statement, trigger the error now. +** Halt with an SQLITE_CONSTRAINT error if there are any unresolved +** foreign key constraint violations. If there are no foreign key +** constraint violations, this is a no-op. ** -** This opcode is a variant of OP_ResultRow that checks the foreign key -** immediate constraint count and throws an error if the count is -** non-zero. The P2 opcode must be 1. +** FK constraint violations are also checked when the prepared statement +** exits. This opcode is used to raise foreign key constraint errors prior +** to returning results such as a row change count or the result of a +** RETURNING clause. */ -case OP_ChngCntRow: { - assert( pOp->p2==1 ); +case OP_FkCheck: { if( (rc = sqlite3VdbeCheckFk(p,0))!=SQLITE_OK ){ goto abort_due_to_error; } - /* Fall through to the next case, OP_ResultRow */ - /* no break */ deliberate_fall_through + break; } /* Opcode: ResultRow P1 P2 * * * @@ -88622,7 +89562,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ sqlite3VdbeMemStringify(pIn1, encoding, 1); testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) ); flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask); - if( NEVER(pIn1==pIn3) ) flags3 = flags1 | MEM_Str; + if( pIn1==pIn3 ) flags3 = flags1 | MEM_Str; } if( (flags3 & MEM_Str)==0 && (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ testcase( pIn3->flags & MEM_Int ); @@ -89003,6 +89943,22 @@ case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */ break; } +/* Opcode: IsNullOrType P1 P2 P3 * * +** Synopsis: if typeof(r[P1]) IN (P3,5) goto P2 +** +** Jump to P2 if the value in register P1 is NULL or has a datatype P3. +** P3 is an integer which should be one of SQLITE_INTEGER, SQLITE_FLOAT, +** SQLITE_BLOB, SQLITE_NULL, or SQLITE_TEXT. +*/ +case OP_IsNullOrType: { /* jump, in1 */ + int doTheJump; + pIn1 = &aMem[pOp->p1]; + doTheJump = (pIn1->flags & MEM_Null)!=0 || sqlite3_value_type(pIn1)==pOp->p3; + VdbeBranchTaken( doTheJump, 2); + if( doTheJump ) goto jump_to_p2; + break; +} + /* Opcode: ZeroOrNull P1 P2 P3 * * ** Synopsis: r[P2] = 0 OR NULL ** @@ -89074,10 +90030,18 @@ case OP_Offset: { /* out3 */ assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; pOut = &p->aMem[pOp->p3]; - if( NEVER(pC==0) || pC->eCurType!=CURTYPE_BTREE ){ + if( pC==0 || pC->eCurType!=CURTYPE_BTREE ){ sqlite3VdbeMemSetNull(pOut); }else{ - sqlite3VdbeMemSetInt64(pOut, sqlite3BtreeOffset(pC->uc.pCursor)); + if( pC->deferredMoveto ){ + rc = sqlite3VdbeFinishMoveto(pC); + if( rc ) goto abort_due_to_error; + } + if( sqlite3BtreeEof(pC->uc.pCursor) ){ + sqlite3VdbeMemSetNull(pOut); + }else{ + sqlite3VdbeMemSetInt64(pOut, sqlite3BtreeOffset(pC->uc.pCursor)); + } } break; } @@ -89136,6 +90100,7 @@ case OP_Column: { assert( pC!=0 ); assert( p2<(u32)pC->nField ); aOffset = pC->aOffset; + assert( aOffset==pC->aType+pC->nField ); assert( pC->eCurType!=CURTYPE_VTAB ); assert( pC->eCurType!=CURTYPE_PSEUDO || pC->nullRow ); assert( pC->eCurType!=CURTYPE_SORTER ); @@ -89371,6 +90336,110 @@ case OP_Column: { } } +/* Opcode: TypeCheck P1 P2 P3 P4 * +** Synopsis: typecheck(r[P1@P2]) +** +** Apply affinities to the range of P2 registers beginning with P1. +** Take the affinities from the Table object in P4. If any value +** cannot be coerced into the correct type, then raise an error. +** +** This opcode is similar to OP_Affinity except that this opcode +** forces the register type to the Table column type. This is used +** to implement "strict affinity". +** +** GENERATED ALWAYS AS ... STATIC columns are only checked if P3 +** is zero. When P3 is non-zero, no type checking occurs for +** static generated columns. Virtual columns are computed at query time +** and so they are never checked. +** +** Preconditions: +** +**
    +**
  • P2 should be the number of non-virtual columns in the +** table of P4. +**
  • Table P4 should be a STRICT table. +**
+** +** If any precondition is false, an assertion fault occurs. +*/ +case OP_TypeCheck: { + Table *pTab; + Column *aCol; + int i; + + assert( pOp->p4type==P4_TABLE ); + pTab = pOp->p4.pTab; + assert( pTab->tabFlags & TF_Strict ); + assert( pTab->nNVCol==pOp->p2 ); + aCol = pTab->aCol; + pIn1 = &aMem[pOp->p1]; + for(i=0; inCol; i++){ + if( aCol[i].colFlags & COLFLAG_GENERATED ){ + if( aCol[i].colFlags & COLFLAG_VIRTUAL ) continue; + if( pOp->p3 ){ pIn1++; continue; } + } + assert( pIn1 < &aMem[pOp->p1+pOp->p2] ); + applyAffinity(pIn1, aCol[i].affinity, encoding); + if( (pIn1->flags & MEM_Null)==0 ){ + switch( aCol[i].eCType ){ + case COLTYPE_BLOB: { + if( (pIn1->flags & MEM_Blob)==0 ) goto vdbe_type_error; + break; + } + case COLTYPE_INTEGER: + case COLTYPE_INT: { + if( (pIn1->flags & MEM_Int)==0 ) goto vdbe_type_error; + break; + } + case COLTYPE_TEXT: { + if( (pIn1->flags & MEM_Str)==0 ) goto vdbe_type_error; + break; + } + case COLTYPE_REAL: { + testcase( (pIn1->flags & (MEM_Real|MEM_IntReal))==MEM_Real ); + testcase( (pIn1->flags & (MEM_Real|MEM_IntReal))==MEM_IntReal ); + if( pIn1->flags & MEM_Int ){ + /* When applying REAL affinity, if the result is still an MEM_Int + ** that will fit in 6 bytes, then change the type to MEM_IntReal + ** so that we keep the high-resolution integer value but know that + ** the type really wants to be REAL. */ + testcase( pIn1->u.i==140737488355328LL ); + testcase( pIn1->u.i==140737488355327LL ); + testcase( pIn1->u.i==-140737488355328LL ); + testcase( pIn1->u.i==-140737488355329LL ); + if( pIn1->u.i<=140737488355327LL && pIn1->u.i>=-140737488355328LL){ + pIn1->flags |= MEM_IntReal; + pIn1->flags &= ~MEM_Int; + }else{ + pIn1->u.r = (double)pIn1->u.i; + pIn1->flags |= MEM_Real; + pIn1->flags &= ~MEM_Int; + } + }else if( (pIn1->flags & (MEM_Real|MEM_IntReal))==0 ){ + goto vdbe_type_error; + } + break; + } + default: { + /* COLTYPE_ANY. Accept anything. */ + break; + } + } + } + REGISTER_TRACE((int)(pIn1-aMem), pIn1); + pIn1++; + } + assert( pIn1 == &aMem[pOp->p1+pOp->p2] ); + break; + +vdbe_type_error: + sqlite3VdbeError(p, "cannot store %s value in %s column %s.%s", + vdbeMemTypeName(pIn1), sqlite3StdType[aCol[i].eCType-1], + pTab->zName, aCol[i].zCnName); + rc = SQLITE_CONSTRAINT_DATATYPE; + goto abort_due_to_error; +} + /* Opcode: Affinity P1 P2 * P4 * ** Synopsis: affinity(r[P1@P2]) ** @@ -89585,7 +90654,7 @@ case OP_MakeRecord: { testcase( uu==127 ); testcase( uu==128 ); testcase( uu==32767 ); testcase( uu==32768 ); testcase( uu==8388607 ); testcase( uu==8388608 ); - testcase( uu==2147483647 ); testcase( uu==2147483648 ); + testcase( uu==2147483647 ); testcase( uu==2147483648LL ); testcase( uu==140737488355327LL ); testcase( uu==140737488355328LL ); if( uu<=127 ){ if( (i&1)==i && file_format>=4 ){ @@ -89713,7 +90782,7 @@ case OP_MakeRecord: { break; } -/* Opcode: Count P1 P2 p3 * * +/* Opcode: Count P1 P2 P3 * * ** Synopsis: r[P2]=count() ** ** Store the number of entries (an integer value) in the table or index @@ -90034,8 +91103,16 @@ case OP_Transaction: { assert( pOp->p2>=0 && pOp->p2<=2 ); assert( pOp->p1>=0 && pOp->p1nDb ); assert( DbMaskTest(p->btreeMask, pOp->p1) ); - if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){ - rc = SQLITE_READONLY; + assert( rc==SQLITE_OK ); + if( pOp->p2 && (db->flags & (SQLITE_QueryOnly|SQLITE_CorruptRdOnly))!=0 ){ + if( db->flags & SQLITE_QueryOnly ){ + /* Writes prohibited by the "PRAGMA query_only=TRUE" statement */ + rc = SQLITE_READONLY; + }else{ + /* Writes prohibited due to a prior SQLITE_CORRUPT in the current + ** transaction */ + rc = SQLITE_CORRUPT; + } goto abort_due_to_error; } pBt = db->aDb[pOp->p1].pBt; @@ -90077,7 +91154,8 @@ case OP_Transaction: { } } assert( pOp->p5==0 || pOp->p4type==P4_INT32 ); - if( pOp->p5 + if( rc==SQLITE_OK + && pOp->p5 && (iMeta!=pOp->p3 || db->aDb[pOp->p1].pSchema->iGeneration!=pOp->p4.i) ){ @@ -90174,6 +91252,7 @@ case OP_SetCookie: { /* When the schema cookie changes, record the new cookie internally */ pDb->pSchema->schema_cookie = pOp->p3 - pOp->p5; db->mDbFlags |= DBFLAG_SchemaChange; + sqlite3FkClearTriggerCache(db, pOp->p1); }else if( pOp->p2==BTREE_FILE_FORMAT ){ /* Record changes in the file format */ pDb->pSchema->file_format = pOp->p3; @@ -90287,6 +91366,8 @@ case OP_ReopenIdx: { pCur = p->apCsr[pOp->p1]; if( pCur && pCur->pgnoRoot==(u32)pOp->p2 ){ assert( pCur->iDb==pOp->p3 ); /* Guaranteed by the code generator */ + assert( pCur->eCurType==CURTYPE_BTREE ); + sqlite3BtreeClearCursor(pCur->uc.pCursor); goto open_cursor_set_hints; } /* If the cursor is not currently open or is open on a different @@ -90349,8 +91430,9 @@ case OP_OpenWrite: assert( pOp->p1>=0 ); assert( nField>=0 ); testcase( nField==0 ); /* Table with INTEGER PRIMARY KEY and nothing else */ - pCur = allocateCursor(p, pOp->p1, nField, iDb, CURTYPE_BTREE); + pCur = allocateCursor(p, pOp->p1, nField, CURTYPE_BTREE); if( pCur==0 ) goto no_mem; + pCur->iDb = iDb; pCur->nullRow = 1; pCur->isOrdered = 1; pCur->pgnoRoot = p2; @@ -90392,7 +91474,7 @@ case OP_OpenDup: { assert( pOrig ); assert( pOrig->isEphemeral ); /* Only ephemeral cursors can be duplicated */ - pCx = allocateCursor(p, pOp->p1, pOrig->nField, -1, CURTYPE_BTREE); + pCx = allocateCursor(p, pOp->p1, pOrig->nField, CURTYPE_BTREE); if( pCx==0 ) goto no_mem; pCx->nullRow = 1; pCx->isEphemeral = 1; @@ -90400,10 +91482,10 @@ case OP_OpenDup: { pCx->isTable = pOrig->isTable; pCx->pgnoRoot = pOrig->pgnoRoot; pCx->isOrdered = pOrig->isOrdered; - pCx->pBtx = pOrig->pBtx; + pCx->ub.pBtx = pOrig->ub.pBtx; pCx->hasBeenDuped = 1; pOrig->hasBeenDuped = 1; - rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR, + rc = sqlite3BtreeCursor(pCx->ub.pBtx, pCx->pgnoRoot, BTREE_WRCSR, pCx->pKeyInfo, pCx->uc.pCursor); /* The sqlite3BtreeCursor() routine can only fail for the first cursor ** opened for a database. Since there is already an open cursor when this @@ -90469,23 +91551,23 @@ case OP_OpenEphemeral: { aMem[pOp->p3].z = ""; } pCx = p->apCsr[pOp->p1]; - if( pCx && !pCx->hasBeenDuped ){ + if( pCx && !pCx->hasBeenDuped && ALWAYS(pOp->p2<=pCx->nField) ){ /* If the ephermeral table is already open and has no duplicates from ** OP_OpenDup, then erase all existing content so that the table is ** empty again, rather than creating a new table. */ assert( pCx->isEphemeral ); pCx->seqCount = 0; pCx->cacheStatus = CACHE_STALE; - rc = sqlite3BtreeClearTable(pCx->pBtx, pCx->pgnoRoot, 0); + rc = sqlite3BtreeClearTable(pCx->ub.pBtx, pCx->pgnoRoot, 0); }else{ - pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, CURTYPE_BTREE); + pCx = allocateCursor(p, pOp->p1, pOp->p2, CURTYPE_BTREE); if( pCx==0 ) goto no_mem; pCx->isEphemeral = 1; - rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBtx, + rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->ub.pBtx, BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5, vfsFlags); if( rc==SQLITE_OK ){ - rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1, 0); + rc = sqlite3BtreeBeginTrans(pCx->ub.pBtx, 1, 0); if( rc==SQLITE_OK ){ /* If a transient index is required, create it by calling ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before @@ -90494,26 +91576,26 @@ case OP_OpenEphemeral: { */ if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ assert( pOp->p4type==P4_KEYINFO ); - rc = sqlite3BtreeCreateTable(pCx->pBtx, &pCx->pgnoRoot, + rc = sqlite3BtreeCreateTable(pCx->ub.pBtx, &pCx->pgnoRoot, BTREE_BLOBKEY | pOp->p5); if( rc==SQLITE_OK ){ assert( pCx->pgnoRoot==SCHEMA_ROOT+1 ); assert( pKeyInfo->db==db ); assert( pKeyInfo->enc==ENC(db) ); - rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR, + rc = sqlite3BtreeCursor(pCx->ub.pBtx, pCx->pgnoRoot, BTREE_WRCSR, pKeyInfo, pCx->uc.pCursor); } pCx->isTable = 0; }else{ pCx->pgnoRoot = SCHEMA_ROOT; - rc = sqlite3BtreeCursor(pCx->pBtx, SCHEMA_ROOT, BTREE_WRCSR, + rc = sqlite3BtreeCursor(pCx->ub.pBtx, SCHEMA_ROOT, BTREE_WRCSR, 0, pCx->uc.pCursor); pCx->isTable = 1; } } pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); if( rc ){ - sqlite3BtreeClose(pCx->pBtx); + sqlite3BtreeClose(pCx->ub.pBtx); } } } @@ -90537,7 +91619,7 @@ case OP_SorterOpen: { assert( pOp->p1>=0 ); assert( pOp->p2>=0 ); - pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, CURTYPE_SORTER); + pCx = allocateCursor(p, pOp->p1, pOp->p2, CURTYPE_SORTER); if( pCx==0 ) goto no_mem; pCx->pKeyInfo = pOp->p4.pKeyInfo; assert( pCx->pKeyInfo->db==db ); @@ -90586,7 +91668,7 @@ case OP_OpenPseudo: { assert( pOp->p1>=0 ); assert( pOp->p3>=0 ); - pCx = allocateCursor(p, pOp->p1, pOp->p3, -1, CURTYPE_PSEUDO); + pCx = allocateCursor(p, pOp->p1, pOp->p3, CURTYPE_PSEUDO); if( pCx==0 ) goto no_mem; pCx->nullRow = 1; pCx->seekResult = pOp->p2; @@ -90774,6 +91856,7 @@ case OP_SeekGT: { /* jump, in3, group */ /* If the P3 value could not be converted into an integer without ** loss of information, then special processing is required... */ if( (newType & (MEM_Int|MEM_IntReal))==0 ){ + int c; if( (newType & MEM_Real)==0 ){ if( (newType & MEM_Null) || oc>=OP_SeekGE ){ VdbeBranchTaken(1,2); @@ -90783,7 +91866,8 @@ case OP_SeekGT: { /* jump, in3, group */ if( rc!=SQLITE_OK ) goto abort_due_to_error; goto seek_not_found; } - }else + } + c = sqlite3IntFloatCompare(iKey, pIn3->u.r); /* If the approximation iKey is larger than the actual real search ** term, substitute >= for > and < for <=. e.g. if the search term @@ -90792,7 +91876,7 @@ case OP_SeekGT: { /* jump, in3, group */ ** (x > 4.9) -> (x >= 5) ** (x <= 4.9) -> (x < 5) */ - if( pIn3->u.r<(double)iKey ){ + if( c>0 ){ assert( OP_SeekGE==(OP_SeekGT-1) ); assert( OP_SeekLT==(OP_SeekLE-1) ); assert( (OP_SeekLE & 0x0001)==(OP_SeekGT & 0x0001) ); @@ -90801,14 +91885,14 @@ case OP_SeekGT: { /* jump, in3, group */ /* If the approximation iKey is smaller than the actual real search ** term, substitute <= for < and > for >=. */ - else if( pIn3->u.r>(double)iKey ){ + else if( c<0 ){ assert( OP_SeekLE==(OP_SeekLT+1) ); assert( OP_SeekGT==(OP_SeekGE+1) ); assert( (OP_SeekLT & 0x0001)==(OP_SeekGE & 0x0001) ); if( (oc & 0x0001)==(OP_SeekLT & 0x0001) ) oc++; } } - rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, 0, (u64)iKey, 0, &res); + rc = sqlite3BtreeTableMoveto(pC->uc.pCursor, (u64)iKey, 0, &res); pC->movetoTarget = iKey; /* Used by OP_Delete */ if( rc!=SQLITE_OK ){ goto abort_due_to_error; @@ -90855,7 +91939,7 @@ case OP_SeekGT: { /* jump, in3, group */ { int i; for(i=0; iuc.pCursor, &r, 0, 0, &res); + rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, &r, &res); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } @@ -91274,7 +92358,7 @@ case OP_Found: { /* jump, in3 */ } } } - rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, pIdxKey, 0, 0, &res); + rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, pIdxKey, &res); if( pFree ) sqlite3DbFreeNN(db, pFree); if( rc!=SQLITE_OK ){ goto abort_due_to_error; @@ -91383,7 +92467,7 @@ case OP_NotExists: /* jump, in3 */ pCrsr = pC->uc.pCursor; assert( pCrsr!=0 ); res = 0; - rc = sqlite3BtreeMovetoUnpacked(pCrsr, 0, iKey, 0, &res); + rc = sqlite3BtreeTableMoveto(pCrsr, iKey, 0, &res); assert( rc==SQLITE_OK || res==0 ); pC->movetoTarget = iKey; /* Used by OP_Delete */ pC->nullRow = 0; @@ -91540,7 +92624,7 @@ case OP_NewRowid: { /* out2 */ do{ sqlite3_randomness(sizeof(v), &v); v &= (MAX_ROWID>>1); v++; /* Ensure that v is greater than zero */ - }while( ((rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, 0, (u64)v, + }while( ((rc = sqlite3BtreeTableMoveto(pC->uc.pCursor, (u64)v, 0, &res))==SQLITE_OK) && (res==0) && (++cnt<100)); @@ -91630,7 +92714,7 @@ case OP_Insert: { assert( (pOp->p5 & OPFLAG_ISNOOP) || HasRowid(pTab) ); }else{ pTab = 0; - zDb = 0; /* Not needed. Silence a compiler warning. */ + zDb = 0; } #ifdef SQLITE_ENABLE_PREUPDATE_HOOK @@ -91783,13 +92867,14 @@ case OP_Delete: { pC->movetoTarget = sqlite3BtreeIntegerKey(pC->uc.pCursor); } }else{ - zDb = 0; /* Not needed. Silence a compiler warning. */ - pTab = 0; /* Not needed. Silence a compiler warning. */ + zDb = 0; + pTab = 0; } #ifdef SQLITE_ENABLE_PREUPDATE_HOOK /* Invoke the pre-update-hook if required. */ - if( db->xPreUpdateCallback && pOp->p4.pTab ){ + assert( db->xPreUpdateCallback==0 || pTab==pOp->p4.pTab ); + if( db->xPreUpdateCallback && pTab ){ assert( !(opflags & OPFLAG_ISUPDATE) || HasRowid(pTab)==0 || (aMem[pOp->p3].flags & MEM_Int) @@ -91830,7 +92915,7 @@ case OP_Delete: { /* Invoke the update-hook if required. */ if( opflags & OPFLAG_NCHANGE ){ p->nChange++; - if( db->xUpdateCallback && HasRowid(pTab) ){ + if( db->xUpdateCallback && ALWAYS(pTab!=0) && HasRowid(pTab) ){ db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, zDb, pTab->zName, pC->movetoTarget); assert( pC->iDb>=0 ); @@ -92035,6 +93120,10 @@ case OP_Rowid: { /* out2 */ ** Move the cursor P1 to a null row. Any OP_Column operations ** that occur while the cursor is on the null row will always ** write a NULL. +** +** Or, if P1 is a Pseudo-Cursor (a cursor opened using OP_OpenPseudo) +** just reset the cache for that cursor. This causes the row of +** content held by the pseudo-cursor to be reparsed. */ case OP_NullRow: { VdbeCursor *pC; @@ -92417,7 +93506,8 @@ case OP_SorterInsert: { /* in2 */ ** an UPDATE or DELETE statement and the index entry to be updated ** or deleted is not found. For some uses of IdxDelete ** (example: the EXCEPT operator) it does not matter that no matching -** entry is found. For those cases, P5 is zero. +** entry is found. For those cases, P5 is zero. Also, do not raise +** this (self-correcting and non-critical) error if in writable_schema mode. */ case OP_IdxDelete: { VdbeCursor *pC; @@ -92438,12 +93528,12 @@ case OP_IdxDelete: { r.nField = (u16)pOp->p3; r.default_rc = 0; r.aMem = &aMem[pOp->p2]; - rc = sqlite3BtreeMovetoUnpacked(pCrsr, &r, 0, 0, &res); + rc = sqlite3BtreeIndexMoveto(pCrsr, &r, &res); if( rc ) goto abort_due_to_error; if( res==0 ){ rc = sqlite3BtreeDelete(pCrsr, BTREE_AUXDELETE); if( rc ) goto abort_due_to_error; - }else if( pOp->p5 ){ + }else if( pOp->p5 && !sqlite3WritableSchema(db) ){ rc = sqlite3ReportError(SQLITE_CORRUPT_INDEX, __LINE__, "index corruption"); goto abort_due_to_error; } @@ -92522,9 +93612,9 @@ case OP_IdxRowid: { /* out2 */ pTabCur->movetoTarget = rowid; pTabCur->deferredMoveto = 1; assert( pOp->p4type==P4_INTARRAY || pOp->p4.ai==0 ); - pTabCur->aAltMap = pOp->p4.ai; - assert( !pC->isEphemeral ); assert( !pTabCur->isEphemeral ); + pTabCur->ub.aAltMap = pOp->p4.ai; + assert( !pC->isEphemeral ); pTabCur->pAltCursor = pC; }else{ pOut = out2Prerelease(p, pOp); @@ -92751,7 +93841,7 @@ case OP_Destroy: { /* out2 */ ** See also: Destroy */ case OP_Clear: { - int nChange; + i64 nChange; sqlite3VdbeIncrWriteCounter(p, 0); nChange = 0; @@ -92877,7 +93967,7 @@ case OP_ParseSchema: { }else #endif { - zSchema = DFLT_SCHEMA_TABLE; + zSchema = LEGACY_SCHEMA_TABLE; initData.db = db; initData.iDb = iDb; initData.pzErrMsg = &p->zErrMsg; @@ -94046,7 +95136,7 @@ case OP_VOpen: { pVCur->pVtab = pVtab; /* Initialize vdbe cursor object */ - pCur = allocateCursor(p, pOp->p1, 0, -1, CURTYPE_VTAB); + pCur = allocateCursor(p, pOp->p1, 0, CURTYPE_VTAB); if( pCur ){ pCur->uc.pVCur = pVCur; pVtab->nRef++; @@ -94059,6 +95149,34 @@ case OP_VOpen: { } #endif /* SQLITE_OMIT_VIRTUALTABLE */ +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VInitIn P1 P2 P3 * * +** Synopsis: r[P2]=ValueList(P1,P3) +** +** Set register P2 to be a pointer to a ValueList object for cursor P1 +** with cache register P3 and output register P3+1. This ValueList object +** can be used as the first argument to sqlite3_vtab_in_first() and +** sqlite3_vtab_in_next() to extract all of the values stored in the P1 +** cursor. Register P3 is used to hold the values returned by +** sqlite3_vtab_in_first() and sqlite3_vtab_in_next(). +*/ +case OP_VInitIn: { /* out2 */ + VdbeCursor *pC; /* The cursor containing the RHS values */ + ValueList *pRhs; /* New ValueList object to put in reg[P2] */ + + pC = p->apCsr[pOp->p1]; + pRhs = sqlite3_malloc64( sizeof(*pRhs) ); + if( pRhs==0 ) goto no_mem; + pRhs->pCsr = pC->uc.pCursor; + pRhs->pOut = &aMem[pOp->p3]; + pOut = out2Prerelease(p, pOp); + pOut->flags = MEM_Null; + sqlite3VdbeMemSetPointer(pOut, pRhs, "ValueList", sqlite3_free); + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + + #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VFilter P1 P2 P3 P4 * ** Synopsis: iplan=r[P3] zplan='P4' @@ -94097,6 +95215,7 @@ case OP_VFilter: { /* jump */ pCur = p->apCsr[pOp->p1]; assert( memIsValid(pQuery) ); REGISTER_TRACE(pOp->p3, pQuery); + assert( pCur!=0 ); assert( pCur->eCurType==CURTYPE_VTAB ); pVCur = pCur->uc.pVCur; pVtab = pVCur->pVtab; @@ -94108,7 +95227,6 @@ case OP_VFilter: { /* jump */ iQuery = (int)pQuery->u.i; /* Invoke the xFilter method */ - res = 0; apArg = p->apArg; for(i = 0; iapCsr[pOp->p1]; + assert( pCur!=0 ); assert( pCur->eCurType==CURTYPE_VTAB ); assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); pDest = &aMem[pOp->p3]; @@ -94198,8 +95317,8 @@ case OP_VNext: { /* jump */ int res; VdbeCursor *pCur; - res = 0; pCur = p->apCsr[pOp->p1]; + assert( pCur!=0 ); assert( pCur->eCurType==CURTYPE_VTAB ); if( pCur->nullRow ){ break; @@ -94295,7 +95414,7 @@ case OP_VUpdate: { const sqlite3_module *pModule; int nArg; int i; - sqlite_int64 rowid; + sqlite_int64 rowid = 0; Mem **apArg; Mem *pX; @@ -94483,6 +95602,77 @@ case OP_Function: { /* group */ break; } +/* Opcode: FilterAdd P1 * P3 P4 * +** Synopsis: filter(P1) += key(P3@P4) +** +** Compute a hash on the P4 registers starting with r[P3] and +** add that hash to the bloom filter contained in r[P1]. +*/ +case OP_FilterAdd: { + u64 h; + + assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); + pIn1 = &aMem[pOp->p1]; + assert( pIn1->flags & MEM_Blob ); + assert( pIn1->n>0 ); + h = filterHash(aMem, pOp); +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + int ii; + for(ii=pOp->p3; iip3+pOp->p4.i; ii++){ + registerTrace(ii, &aMem[ii]); + } + printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n)); + } +#endif + h %= pIn1->n; + pIn1->z[h/8] |= 1<<(h&7); + break; +} + +/* Opcode: Filter P1 P2 P3 P4 * +** Synopsis: if key(P3@P4) not in filter(P1) goto P2 +** +** Compute a hash on the key contained in the P4 registers starting +** with r[P3]. Check to see if that hash is found in the +** bloom filter hosted by register P1. If it is not present then +** maybe jump to P2. Otherwise fall through. +** +** False negatives are harmless. It is always safe to fall through, +** even if the value is in the bloom filter. A false negative causes +** more CPU cycles to be used, but it should still yield the correct +** answer. However, an incorrect answer may well arise from a +** false positive - if the jump is taken when it should fall through. +*/ +case OP_Filter: { /* jump */ + u64 h; + + assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); + pIn1 = &aMem[pOp->p1]; + assert( (pIn1->flags & MEM_Blob)!=0 ); + assert( pIn1->n >= 1 ); + h = filterHash(aMem, pOp); +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + int ii; + for(ii=pOp->p3; iip3+pOp->p4.i; ii++){ + registerTrace(ii, &aMem[ii]); + } + printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n)); + } +#endif + h %= pIn1->n; + if( (pIn1->z[h/8] & (1<<(h&7)))==0 ){ + VdbeBranchTaken(1, 2); + p->aCounter[SQLITE_STMTSTATUS_FILTER_HIT]++; + goto jump_to_p2; + }else{ + p->aCounter[SQLITE_STMTSTATUS_FILTER_MISS]++; + VdbeBranchTaken(0, 2); + } + break; +} + /* Opcode: Trace P1 P2 * P4 * ** ** Write P4 on the statement trace output if statement tracing is @@ -94740,6 +95930,18 @@ default: { /* This is really OP_Noop, OP_Explain */ rc = SQLITE_CORRUPT_BKPT; } assert( rc ); +#ifdef SQLITE_DEBUG + if( db->flags & SQLITE_VdbeTrace ){ + const char *zTrace = p->zSql; + if( zTrace==0 ){ + if( aOp[0].opcode==OP_Trace ){ + zTrace = aOp[0].p4.z; + } + if( zTrace==0 ) zTrace = "???"; + } + printf("ABORT-due-to-error (rc=%d): %s\n", rc, zTrace); + } +#endif if( p->zErrMsg==0 && rc!=SQLITE_IOERR_NOMEM ){ sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc)); } @@ -94750,6 +95952,9 @@ default: { /* This is really OP_Noop, OP_Explain */ (int)(pOp - aOp), p->zSql, p->zErrMsg); sqlite3VdbeHalt(p); if( rc==SQLITE_IOERR_NOMEM ) sqlite3OomFault(db); + if( rc==SQLITE_CORRUPT && db->autoCommit==0 ){ + db->flags |= SQLITE_CorruptRdOnly; + } rc = SQLITE_ERROR; if( resetSchemaOnFault>0 ){ sqlite3ResetOneSchema(db, resetSchemaOnFault-1); @@ -94881,7 +96086,10 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){ } if( rc==SQLITE_ROW ){ VdbeCursor *pC = v->apCsr[0]; - u32 type = pC->nHdrParsed>p->iCol ? pC->aType[p->iCol] : 0; + u32 type; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + type = pC->nHdrParsed>p->iCol ? pC->aType[p->iCol] : 0; testcase( pC->nHdrParsed==p->iCol ); testcase( pC->nHdrParsed==p->iCol+1 ); if( type<12 ){ @@ -94955,10 +96163,9 @@ SQLITE_API int sqlite3_blob_open( sqlite3_mutex_enter(db->mutex); pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob)); - do { - memset(&sParse, 0, sizeof(Parse)); + while(1){ + sqlite3ParseObjectInit(&sParse,db); if( !pBlob ) goto blob_open_out; - sParse.db = db; sqlite3DbFree(db, zErr); zErr = 0; @@ -94973,7 +96180,7 @@ SQLITE_API int sqlite3_blob_open( sqlite3ErrorMsg(&sParse, "cannot open table without rowid: %s", zTable); } #ifndef SQLITE_OMIT_VIEW - if( pTab && pTab->pSelect ){ + if( pTab && IsView(pTab) ){ pTab = 0; sqlite3ErrorMsg(&sParse, "cannot open view: %s", zTable); } @@ -94993,7 +96200,7 @@ SQLITE_API int sqlite3_blob_open( /* Now search pTab for the exact column. */ for(iCol=0; iColnCol; iCol++) { - if( sqlite3StrICmp(pTab->aCol[iCol].zName, zColumn)==0 ){ + if( sqlite3StrICmp(pTab->aCol[iCol].zCnName, zColumn)==0 ){ break; } } @@ -95018,7 +96225,8 @@ SQLITE_API int sqlite3_blob_open( ** key columns must be indexed. The check below will pick up this ** case. */ FKey *pFKey; - for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ + assert( IsOrdinaryTable(pTab) ); + for(pFKey=pTab->u.tab.pFKey; pFKey; pFKey=pFKey->pNextFrom){ int j; for(j=0; jnCol; j++){ if( pFKey->aCol[j].iFrom==iCol ){ @@ -95134,7 +96342,9 @@ SQLITE_API int sqlite3_blob_open( goto blob_open_out; } rc = blobSeekToRow(pBlob, iRow, &zErr); - } while( (++nAttempt)=SQLITE_MAX_SCHEMA_RETRY || rc!=SQLITE_SCHEMA ) break; + sqlite3ParseObjectReset(&sParse); + } blob_open_out: if( rc==SQLITE_OK && db->mallocFailed==0 ){ @@ -95145,7 +96355,7 @@ SQLITE_API int sqlite3_blob_open( } sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); sqlite3DbFree(db, zErr); - sqlite3ParserReset(&sParse); + sqlite3ParseObjectReset(&sParse); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; @@ -95225,6 +96435,8 @@ static int blobReadWrite( */ sqlite3_int64 iKey; iKey = sqlite3BtreeIntegerKey(p->pCsr); + assert( v->apCsr[0]!=0 ); + assert( v->apCsr[0]->eCurType==CURTYPE_BTREE ); sqlite3VdbePreUpdateHook( v, v->apCsr[0], SQLITE_DELETE, p->zDb, p->pTab, iKey, -1, p->iCol ); @@ -96278,7 +97490,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( } #endif - assert( pCsr->pKeyInfo && pCsr->pBtx==0 ); + assert( pCsr->pKeyInfo ); + assert( !pCsr->isEphemeral ); assert( pCsr->eCurType==CURTYPE_SORTER ); szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nKeyField-1)*sizeof(CollSeq*); sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask); @@ -96607,7 +97820,7 @@ static void vdbeSorterExtendFile(sqlite3 *db, sqlite3_file *pFd, i64 nByte){ sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_CHUNK_SIZE, &chunksize); sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_SIZE_HINT, &nByte); sqlite3OsFetch(pFd, 0, (int)nByte, &p); - sqlite3OsUnfetch(pFd, 0, p); + if( p ) sqlite3OsUnfetch(pFd, 0, p); } } #else @@ -97325,6 +98538,7 @@ static int vdbeIncrMergerNew( vdbeMergeEngineFree(pMerger); rc = SQLITE_NOMEM_BKPT; } + assert( *ppOut!=0 || rc!=SQLITE_OK ); return rc; } @@ -98690,6 +99904,9 @@ static int memjrnlCreateFile(MemJournal *p){ } +/* Forward reference */ +static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size); + /* ** Write data to the file. */ @@ -98720,22 +99937,20 @@ static int memjrnlWrite( ** the in-memory journal is being used by a connection using the ** atomic-write optimization. In this case the first 28 bytes of the ** journal file may be written as part of committing the transaction. */ - assert( iOfst==p->endpoint.iOffset || iOfst==0 ); -#if defined(SQLITE_ENABLE_ATOMIC_WRITE) \ - || defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE) + assert( iOfst<=p->endpoint.iOffset ); + if( iOfst>0 && iOfst!=p->endpoint.iOffset ){ + memjrnlTruncate(pJfd, iOfst); + } if( iOfst==0 && p->pFirst ){ assert( p->nChunkSize>iAmt ); memcpy((u8*)p->pFirst->zChunk, zBuf, iAmt); - }else -#else - assert( iOfst>0 || p->pFirst==0 ); -#endif - { + }else{ while( nWrite>0 ){ FileChunk *pChunk = p->endpoint.pChunk; int iChunkOffset = (int)(p->endpoint.iOffset%p->nChunkSize); int iSpace = MIN(nWrite, p->nChunkSize - iChunkOffset); + assert( pChunk!=0 || iChunkOffset==0 ); if( iChunkOffset==0 ){ /* New chunk is required to extend the file. */ FileChunk *pNew = sqlite3_malloc(fileChunkSize(p->nChunkSize)); @@ -98750,10 +99965,11 @@ static int memjrnlWrite( assert( !p->pFirst ); p->pFirst = pNew; } - p->endpoint.pChunk = pNew; + pChunk = p->endpoint.pChunk = pNew; } - memcpy((u8*)p->endpoint.pChunk->zChunk + iChunkOffset, zWrite, iSpace); + assert( pChunk!=0 ); + memcpy((u8*)pChunk->zChunk + iChunkOffset, zWrite, iSpace); zWrite += iSpace; nWrite -= iSpace; p->endpoint.iOffset += iSpace; @@ -98777,7 +99993,7 @@ static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ p->pFirst = 0; }else{ i64 iOff = p->nChunkSize; - for(pIter=p->pFirst; ALWAYS(pIter) && iOff<=size; pIter=pIter->pNext){ + for(pIter=p->pFirst; ALWAYS(pIter) && iOffpNext){ iOff += p->nChunkSize; } if( ALWAYS(pIter) ){ @@ -99026,7 +100242,7 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){ assert( !ExprHasProperty(pExpr, EP_WinFunc) ); pExpr = pExpr->pRight; continue; - }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + }else if( ExprUseXSelect(pExpr) ){ assert( !ExprHasProperty(pExpr, EP_WinFunc) ); if( sqlite3WalkSelect(pWalker, pExpr->x.pSelect) ) return WRC_Abort; }else{ @@ -99298,6 +100514,7 @@ static void resolveAlias( }else{ incrAggFunctionDepth(pDup, nSubquery); if( pExpr->op==TK_COLLATE ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken); } @@ -99401,6 +100618,7 @@ SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr *pExpr){ Table *pExTab; n = pExpr->iColumn; + assert( ExprUseYTab(pExpr) ); pExTab = pExpr->y.pTab; assert( pExTab!=0 ); if( (pExTab->tabFlags & TF_HasGenerated)!=0 @@ -99514,7 +100732,7 @@ static int lookupName( u8 hCol; pTab = pItem->pTab; assert( pTab!=0 && pTab->zName!=0 ); - assert( pTab->nCol>0 ); + assert( pTab->nCol>0 || pParse->nErr ); if( pItem->pSelect && (pItem->pSelect->selFlags & SF_NestedFrom)!=0 ){ int hit = 0; pEList = pItem->pSelect->pEList; @@ -99529,8 +100747,9 @@ static int lookupName( } if( hit || zTab==0 ) continue; } - if( zDb && pTab->pSchema!=pSchema ){ - continue; + if( zDb ){ + if( pTab->pSchema!=pSchema ) continue; + if( pSchema==0 && strcmp(zDb,"*")!=0 ) continue; } if( zTab ){ const char *zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName; @@ -99538,16 +100757,16 @@ static int lookupName( if( sqlite3StrICmp(zTabName, zTab)!=0 ){ continue; } + assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT && pItem->zAlias ){ sqlite3RenameTokenRemap(pParse, 0, (void*)&pExpr->y.pTab); } } - if( 0==(cntTab++) ){ - pMatch = pItem; - } hCol = sqlite3StrIHash(zCol); for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ - if( pCol->hName==hCol && sqlite3StrICmp(pCol->zName, zCol)==0 ){ + if( pCol->hName==hCol + && sqlite3StrICmp(pCol->zCnName, zCol)==0 + ){ /* If there has been exactly one prior match and this match ** is for the right-hand table of a NATURAL JOIN or is in a ** USING clause, then skip this match. @@ -99563,9 +100782,14 @@ static int lookupName( break; } } + if( 0==cnt && VisibleRowid(pTab) ){ + cntTab++; + pMatch = pItem; + } } if( pMatch ){ pExpr->iTable = pMatch->iCursor; + assert( ExprUseYTab(pExpr) ); pExpr->y.pTab = pMatch->pTab; /* RIGHT JOIN not (yet) supported */ assert( (pMatch->fg.jointype & JT_RIGHT)==0 ); @@ -99620,7 +100844,9 @@ static int lookupName( pSchema = pTab->pSchema; cntTab++; for(iCol=0, pCol=pTab->aCol; iColnCol; iCol++, pCol++){ - if( pCol->hName==hCol && sqlite3StrICmp(pCol->zName, zCol)==0 ){ + if( pCol->hName==hCol + && sqlite3StrICmp(pCol->zCnName, zCol)==0 + ){ if( iCol==pTab->iPKey ){ iCol = -1; } @@ -99637,6 +100863,7 @@ static int lookupName( #ifndef SQLITE_OMIT_UPSERT if( pExpr->iTable==EXCLUDED_TABLE_NUMBER ){ testcase( iCol==(-1) ); + assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT ){ pExpr->iColumn = iCol; pExpr->y.pTab = pTab; @@ -99649,9 +100876,11 @@ static int lookupName( }else #endif /* SQLITE_OMIT_UPSERT */ { + assert( ExprUseYTab(pExpr) ); pExpr->y.pTab = pTab; if( pParse->bReturning ){ eNewExprOp = TK_REGISTER; + pExpr->op2 = TK_COLUMN; pExpr->iTable = pNC->uNC.iBaseReg + (pTab->nCol+1)*pExpr->iTable + sqlite3TableColumnToStorage(pTab, iCol) + 1; }else{ @@ -99685,7 +100914,7 @@ static int lookupName( && pMatch && (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0 && sqlite3IsRowid(zCol) - && VisibleRowid(pMatch->pTab) + && ALWAYS(VisibleRowid(pMatch->pTab)) ){ cnt = 1; pExpr->iColumn = -1; @@ -99723,8 +100952,8 @@ static int lookupName( ){ Expr *pOrig; assert( pExpr->pLeft==0 && pExpr->pRight==0 ); - assert( pExpr->x.pList==0 ); - assert( pExpr->x.pSelect==0 ); + assert( ExprUseXList(pExpr)==0 || pExpr->x.pList==0 ); + assert( ExprUseXSelect(pExpr)==0 || pExpr->x.pSelect==0 ); pOrig = pEList->a[j].pExpr; if( (pNC->ncFlags&NC_AllowAgg)==0 && ExprHasProperty(pOrig, EP_Agg) ){ sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); @@ -99796,7 +101025,7 @@ static int lookupName( sqlite3VdbeAddDblquoteStr(db, pParse->pVdbe, zCol); #endif pExpr->op = TK_STRING; - pExpr->y.pTab = 0; + memset(&pExpr->y, 0, sizeof(pExpr->y)); return WRC_Prune; } if( sqlite3ExprIdToTrueFalse(pExpr) ){ @@ -99818,6 +101047,7 @@ static int lookupName( }else{ sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); } + sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr); pParse->checkSchema = 1; pTopNC->nNcErr++; } @@ -99882,7 +101112,9 @@ SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSr Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); if( p ){ SrcItem *pItem = &pSrc->a[iSrc]; - Table *pTab = p->y.pTab = pItem->pTab; + Table *pTab; + assert( ExprUseYTab(p) ); + pTab = p->y.pTab = pItem->pTab; p->iTable = pItem->iCursor; if( p->y.pTab->iPKey==iCol ){ p->iColumn = -1; @@ -99924,7 +101156,8 @@ static void notValidImpl( Parse *pParse, /* Leave error message here */ NameContext *pNC, /* The name context */ const char *zMsg, /* Type of error */ - Expr *pExpr /* Invalidate this expression on error */ + Expr *pExpr, /* Invalidate this expression on error */ + Expr *pError /* Associate error with this expression */ ){ const char *zIn = "partial index WHERE clauses"; if( pNC->ncFlags & NC_IdxExpr ) zIn = "index expressions"; @@ -99936,10 +101169,11 @@ static void notValidImpl( #endif sqlite3ErrorMsg(pParse, "%s prohibited in %s", zMsg, zIn); if( pExpr ) pExpr->op = TK_NULL; + sqlite3RecordErrorOffsetOfExpr(pParse->db, pError); } -#define sqlite3ResolveNotValid(P,N,M,X,E) \ +#define sqlite3ResolveNotValid(P,N,M,X,E,R) \ assert( ((X)&~(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol))==0 ); \ - if( ((N)->ncFlags & (X))!=0 ) notValidImpl(P,N,M,E); + if( ((N)->ncFlags & (X))!=0 ) notValidImpl(P,N,M,E,R); /* ** Expression p should encode a floating point value between 1.0 and 0.0. @@ -99949,6 +101183,7 @@ static void notValidImpl( static int exprProbability(Expr *p){ double r = -1.0; if( p->op!=TK_FLOAT ) return -1; + assert( !ExprHasProperty(p, EP_IntValue) ); sqlite3AtoF(p->u.zToken, &r, sqlite3Strlen30(p->u.zToken), SQLITE_UTF8); assert( r>=0.0 ); if( r>1.0 ) return -1; @@ -99997,6 +101232,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pSrcList && pSrcList->nSrc>=1 ); pItem = pSrcList->a; pExpr->op = TK_COLUMN; + assert( ExprUseYTab(pExpr) ); pExpr->y.pTab = pItem->pTab; pExpr->iTable = pItem->iCursor; pExpr->iColumn--; @@ -100028,6 +101264,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ } sqlite3WalkExpr(pWalker, pExpr->pLeft); if( 0==sqlite3ExprCanBeNull(pExpr->pLeft) && !IN_RENAME_OBJECT ){ + testcase( ExprHasProperty(pExpr, EP_FromJoin) ); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); if( pExpr->op==TK_NOTNULL ){ pExpr->u.zToken = "true"; ExprSetProperty(pExpr, EP_IsTrue); @@ -100063,24 +101301,28 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_ID ){ zDb = 0; zTable = 0; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); zColumn = pExpr->u.zToken; }else{ Expr *pLeft = pExpr->pLeft; testcase( pNC->ncFlags & NC_IdxExpr ); testcase( pNC->ncFlags & NC_GenCol ); sqlite3ResolveNotValid(pParse, pNC, "the \".\" operator", - NC_IdxExpr|NC_GenCol, 0); + NC_IdxExpr|NC_GenCol, 0, pExpr); pRight = pExpr->pRight; if( pRight->op==TK_ID ){ zDb = 0; }else{ assert( pRight->op==TK_DOT ); + assert( !ExprHasProperty(pRight, EP_IntValue) ); zDb = pLeft->u.zToken; pLeft = pRight->pLeft; pRight = pRight->pRight; } + assert( ExprUseUToken(pLeft) && ExprUseUToken(pRight) ); zTable = pLeft->u.zToken; zColumn = pRight->u.zToken; + assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, (void*)pExpr, (void*)pRight); sqlite3RenameTokenRemap(pParse, (void*)&pExpr->y.pTab, (void*)pLeft); @@ -100097,7 +101339,6 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ int no_such_func = 0; /* True if no such function exists */ int wrong_num_args = 0; /* True if wrong number of arguments */ int is_agg = 0; /* True if is an aggregate function */ - int nId; /* Number of characters in function name */ const char *zId; /* The function name. */ FuncDef *pDef; /* Information about the function */ u8 enc = ENC(pParse->db); /* The database encoding */ @@ -100105,9 +101346,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #ifndef SQLITE_OMIT_WINDOWFUNC Window *pWin = (IsWindowFunc(pExpr) ? pExpr->y.pWin : 0); #endif - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + assert( !ExprHasProperty(pExpr, EP_xIsSelect|EP_IntValue) ); zId = pExpr->u.zToken; - nId = sqlite3Strlen30(zId); pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0); if( pDef==0 ){ pDef = sqlite3FindFunction(pParse->db, zId, -2, enc, 0); @@ -100124,8 +101364,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ pExpr->iTable = exprProbability(pList->a[1].pExpr); if( pExpr->iTable<0 ){ sqlite3ErrorMsg(pParse, - "second argument to likelihood() must be a " - "constant between 0.0 and 1.0"); + "second argument to %#T() must be a " + "constant between 0.0 and 1.0", pExpr); pNC->nNcErr++; } }else{ @@ -100146,8 +101386,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ int auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0,pDef->zName,0); if( auth!=SQLITE_OK ){ if( auth==SQLITE_DENY ){ - sqlite3ErrorMsg(pParse, "not authorized to use function: %s", - pDef->zName); + sqlite3ErrorMsg(pParse, "not authorized to use function: %#T", + pExpr); pNC->nNcErr++; } pExpr->op = TK_NULL; @@ -100170,7 +101410,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ ** in a CHECK constraint. SQLServer, MySQL, and PostgreSQL all ** all this. */ sqlite3ResolveNotValid(pParse, pNC, "non-deterministic functions", - NC_IdxExpr|NC_PartIdx|NC_GenCol, 0); + NC_IdxExpr|NC_PartIdx|NC_GenCol, 0, pExpr); }else{ assert( (NC_SelfRef & 0xff)==NC_SelfRef ); /* Must fit in 8 bits */ pExpr->op2 = pNC->ncFlags & NC_SelfRef; @@ -100183,7 +101423,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ /* Internal-use-only functions are disallowed unless the ** SQL is being compiled using sqlite3NestedParse() or ** the SQLITE_TESTCTRL_INTERNAL_FUNCTIONS test-control has be - ** used to activate internal functionsn for testing purposes */ + ** used to activate internal functions for testing purposes */ no_such_func = 1; pDef = 0; }else @@ -100202,7 +101442,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ ); if( pDef && pDef->xValue==0 && pWin ){ sqlite3ErrorMsg(pParse, - "%.*s() may not be used as a window function", nId, zId + "%#T() may not be used as a window function", pExpr ); pNC->nNcErr++; }else if( @@ -100216,13 +101456,13 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ }else{ zType = "aggregate"; } - sqlite3ErrorMsg(pParse, "misuse of %s function %.*s()",zType,nId,zId); + sqlite3ErrorMsg(pParse, "misuse of %s function %#T()",zType,pExpr); pNC->nNcErr++; is_agg = 0; } #else if( (is_agg && (pNC->ncFlags & NC_AllowAgg)==0) ){ - sqlite3ErrorMsg(pParse,"misuse of aggregate function %.*s()",nId,zId); + sqlite3ErrorMsg(pParse,"misuse of aggregate function %#T()",pExpr); pNC->nNcErr++; is_agg = 0; } @@ -100232,18 +101472,18 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ && pParse->explain==0 #endif ){ - sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); + sqlite3ErrorMsg(pParse, "no such function: %#T", pExpr); pNC->nNcErr++; }else if( wrong_num_args ){ - sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", - nId, zId); + sqlite3ErrorMsg(pParse,"wrong number of arguments to function %#T()", + pExpr); pNC->nNcErr++; } #ifndef SQLITE_OMIT_WINDOWFUNC else if( is_agg==0 && ExprHasProperty(pExpr, EP_WinFunc) ){ sqlite3ErrorMsg(pParse, - "FILTER may not be used with non-aggregate %.*s()", - nId, zId + "FILTER may not be used with non-aggregate %#T()", + pExpr ); pNC->nNcErr++; } @@ -100269,7 +101509,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #ifndef SQLITE_OMIT_WINDOWFUNC if( pWin ){ Select *pSel = pNC->pWinSelect; - assert( pWin==pExpr->y.pWin ); + assert( pWin==0 || (ExprUseYWin(pExpr) && pWin==pExpr->y.pWin) ); if( IN_RENAME_OBJECT==0 ){ sqlite3WindowUpdate(pParse, pSel ? pSel->pWinDefn : 0, pWin, pDef); if( pParse->db->mallocFailed ) break; @@ -100282,7 +101522,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ }else #endif /* SQLITE_OMIT_WINDOWFUNC */ { - NameContext *pNC2 = pNC; + NameContext *pNC2; /* For looping up thru outer contexts */ pExpr->op = TK_AGG_FUNCTION; pExpr->op2 = 0; #ifndef SQLITE_OMIT_WINDOWFUNC @@ -100290,16 +101530,22 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ sqlite3WalkExpr(pWalker, pExpr->y.pWin->pFilter); } #endif - while( pNC2 && !sqlite3FunctionUsesThisSrc(pExpr, pNC2->pSrcList) ){ + pNC2 = pNC; + while( pNC2 + && sqlite3ReferencesSrcList(pParse, pExpr, pNC2->pSrcList)==0 + ){ pExpr->op2++; pNC2 = pNC2->pNext; } assert( pDef!=0 || IN_RENAME_OBJECT ); if( pNC2 && pDef ){ assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg ); + assert( SQLITE_FUNC_ANYORDER==NC_OrderAgg ); testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 ); - pNC2->ncFlags |= NC_HasAgg | (pDef->funcFlags & SQLITE_FUNC_MINMAX); - + testcase( (pDef->funcFlags & SQLITE_FUNC_ANYORDER)!=0 ); + pNC2->ncFlags |= NC_HasAgg + | ((pDef->funcFlags^SQLITE_FUNC_ANYORDER) + & (SQLITE_FUNC_MINMAX|SQLITE_FUNC_ANYORDER)); } } pNC->ncFlags |= savedAllowFlags; @@ -100315,15 +101561,17 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #endif case TK_IN: { testcase( pExpr->op==TK_IN ); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ int nRef = pNC->nRef; testcase( pNC->ncFlags & NC_IsCheck ); testcase( pNC->ncFlags & NC_PartIdx ); testcase( pNC->ncFlags & NC_IdxExpr ); testcase( pNC->ncFlags & NC_GenCol ); - sqlite3ResolveNotValid(pParse, pNC, "subqueries", - NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol, pExpr); - sqlite3WalkSelect(pWalker, pExpr->x.pSelect); + if( pNC->ncFlags & NC_SelfRef ){ + notValidImpl(pParse, pNC, "subqueries", pExpr, pExpr); + }else{ + sqlite3WalkSelect(pWalker, pExpr->x.pSelect); + } assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); @@ -100338,7 +101586,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ testcase( pNC->ncFlags & NC_IdxExpr ); testcase( pNC->ncFlags & NC_GenCol ); sqlite3ResolveNotValid(pParse, pNC, "parameters", - NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol, pExpr); + NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol, pExpr, pExpr); break; } case TK_IS: @@ -100370,6 +101618,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pExpr->pLeft!=0 ); nLeft = sqlite3ExprVectorSize(pExpr->pLeft); if( pExpr->op==TK_BETWEEN ){ + assert( ExprUseXList(pExpr) ); nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[0].pExpr); if( nRight==nLeft ){ nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[1].pExpr); @@ -100389,11 +101638,13 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ testcase( pExpr->op==TK_ISNOT ); testcase( pExpr->op==TK_BETWEEN ); sqlite3ErrorMsg(pParse, "row value misused"); + sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr); } break; } } - return (pParse->nErr || pParse->db->mallocFailed) ? WRC_Abort : WRC_Continue; + assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 ); + return pParse->nErr ? WRC_Abort : WRC_Continue; } /* @@ -100418,7 +101669,9 @@ static int resolveAsName( UNUSED_PARAMETER(pParse); if( pE->op==TK_ID ){ - char *zCol = pE->u.zToken; + const char *zCol; + assert( !ExprHasProperty(pE, EP_IntValue) ); + zCol = pE->u.zToken; for(i=0; inExpr; i++){ if( pEList->a[i].eEName==ENAME_NAME && sqlite3_stricmp(pEList->a[i].zEName, zCol)==0 @@ -100499,11 +101752,13 @@ static void resolveOutOfRangeError( Parse *pParse, /* The error context into which to write the error */ const char *zType, /* "ORDER" or "GROUP" */ int i, /* The index (1-based) of the term out of range */ - int mx /* Largest permissible value of i */ + int mx, /* Largest permissible value of i */ + Expr *pError /* Associate the error with the expression */ ){ sqlite3ErrorMsg(pParse, "%r %s BY term out of range - should be " "between 1 and %d", i, zType, mx); + sqlite3RecordErrorOffsetOfExpr(pParse->db, pError); } /* @@ -100559,7 +101814,7 @@ static int resolveCompoundOrderBy( if( NEVER(pE==0) ) continue; if( sqlite3ExprIsInteger(pE, &iCol) ){ if( iCol<=0 || iCol>pEList->nExpr ){ - resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr); + resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr, pE); return 1; } }else{ @@ -100655,7 +101910,7 @@ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy( for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ if( pItem->u.x.iOrderByCol ){ if( pItem->u.x.iOrderByCol>pEList->nExpr ){ - resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr); + resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr, 0); return 1; } resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr,0); @@ -100747,7 +102002,7 @@ static int resolveOrderGroupBy( ** number so that sqlite3ResolveOrderGroupBy() will convert the ** order-by term to a copy of the result-set expression */ if( iCol<1 || iCol>0xffff ){ - resolveOutOfRangeError(pParse, zType, i+1, nResult); + resolveOutOfRangeError(pParse, zType, i+1, nResult, pE2); return 1; } pItem->u.x.iOrderByCol = (u16)iCol; @@ -100805,7 +102060,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ */ if( (p->selFlags & SF_Expanded)==0 ){ sqlite3SelectPrep(pParse, p, pOuterNC); - return (pParse->nErr || db->mallocFailed) ? WRC_Abort : WRC_Prune; + return pParse->nErr ? WRC_Abort : WRC_Prune; } isCompound = p->pPrior!=0; @@ -100842,7 +102097,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ p->pOrderBy = 0; } - /* Recursively resolve names in all subqueries + /* Recursively resolve names in all subqueries in the FROM clause */ for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; @@ -100853,7 +102108,8 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ if( pItem->zName ) pParse->zAuthContext = pItem->zName; sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); pParse->zAuthContext = zSavedContext; - if( pParse->nErr || db->mallocFailed ) return WRC_Abort; + if( pParse->nErr ) return WRC_Abort; + assert( db->mallocFailed==0 ); /* If the number of references to the outer context changed when ** expressions in the sub-select were resolved, the sub-select @@ -100886,7 +102142,8 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ pGroupBy = p->pGroupBy; if( pGroupBy || (sNC.ncFlags & NC_HasAgg)!=0 ){ assert( NC_MinMaxAgg==SF_MinMaxAgg ); - p->selFlags |= SF_Aggregate | (sNC.ncFlags&NC_MinMaxAgg); + assert( NC_OrderAgg==SF_OrderByReqd ); + p->selFlags |= SF_Aggregate | (sNC.ncFlags&(NC_MinMaxAgg|NC_OrderAgg)); }else{ sNC.ncFlags &= ~NC_AllowAgg; } @@ -101069,8 +102326,8 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames( Walker w; if( pExpr==0 ) return SQLITE_OK; - savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin); - pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin); + savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); + pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); w.pParse = pNC->pParse; w.xExprCallback = resolveExprStep; w.xSelectCallback = (pNC->ncFlags & NC_NoSelect) ? 0 : resolveSelectStep; @@ -101113,8 +102370,8 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( w.xSelectCallback = resolveSelectStep; w.xSelectCallback2 = 0; w.u.pNC = pNC; - savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin); - pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin); + savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); + pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); for(i=0; inExpr; i++){ Expr *pExpr = pList->a[i].pExpr; if( pExpr==0 ) continue; @@ -101132,10 +102389,11 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( assert( EP_Win==NC_HasWin ); testcase( pNC->ncFlags & NC_HasAgg ); testcase( pNC->ncFlags & NC_HasWin ); - if( pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin) ){ + if( pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg) ){ ExprSetProperty(pExpr, pNC->ncFlags & (NC_HasAgg|NC_HasWin) ); - savedHasAgg |= pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin); - pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin); + savedHasAgg |= pNC->ncFlags & + (NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); + pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); } if( w.pParse->nErr>0 ) return WRC_Abort; } @@ -101249,9 +102507,9 @@ static int exprCodeVector(Parse *pParse, Expr *p, int *piToFree); /* ** Return the affinity character for a single column of a table. */ -SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table *pTab, int iCol){ - assert( iColnCol ); - return iCol>=0 ? pTab->aCol[iCol].affinity : SQLITE_AFF_INTEGER; +SQLITE_PRIVATE char sqlite3TableColumnAffinity(const Table *pTab, int iCol){ + if( iCol<0 || NEVER(iCol>=pTab->nCol) ) return SQLITE_AFF_INTEGER; + return pTab->aCol[iCol].affinity; } /* @@ -101281,11 +102539,14 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ } op = pExpr->op; if( op==TK_REGISTER ) op = pExpr->op2; - if( (op==TK_COLUMN || op==TK_AGG_COLUMN) && pExpr->y.pTab ){ - return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); + if( op==TK_COLUMN || op==TK_AGG_COLUMN ){ + assert( ExprUseYTab(pExpr) ); + if( pExpr->y.pTab ){ + return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); + } } if( op==TK_SELECT ){ - assert( pExpr->flags&EP_xIsSelect ); + assert( ExprUseXSelect(pExpr) ); assert( pExpr->x.pSelect!=0 ); assert( pExpr->x.pSelect->pEList!=0 ); assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 ); @@ -101298,12 +102559,15 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ } #endif if( op==TK_SELECT_COLUMN ){ - assert( pExpr->pLeft->flags&EP_xIsSelect ); + assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) ); + assert( pExpr->iColumn < pExpr->iTable ); + assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr ); return sqlite3ExprAffinity( pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr ); } if( op==TK_VECTOR ){ + assert( ExprUseXList(pExpr) ); return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr); } return pExpr->affExpr; @@ -101318,7 +102582,7 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ ** and the pExpr parameter is returned unchanged. */ SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken( - Parse *pParse, /* Parsing context */ + const Parse *pParse, /* Parsing context */ Expr *pExpr, /* Add the "COLLATE" clause to this expression */ const Token *pCollName, /* Name of collating sequence */ int dequote /* True to dequote pCollName */ @@ -101333,7 +102597,11 @@ SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken( } return pExpr; } -SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse *pParse, Expr *pExpr, const char *zC){ +SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString( + const Parse *pParse, /* Parsing context */ + Expr *pExpr, /* Add the "COLLATE" clause to this expression */ + const char *zC /* The collating sequence name */ +){ Token s; assert( zC!=0 ); sqlite3TokenInit(&s, (char*)zC); @@ -101359,7 +102627,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr *pExpr){ SQLITE_PRIVATE Expr *sqlite3ExprSkipCollateAndLikely(Expr *pExpr){ while( pExpr && ExprHasProperty(pExpr, EP_Skip|EP_Unlikely) ){ if( ExprHasProperty(pExpr, EP_Unlikely) ){ - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + assert( ExprUseXList(pExpr) ); assert( pExpr->x.pList->nExpr>0 ); assert( pExpr->op==TK_FUNCTION ); pExpr = pExpr->x.pList->a[0].pExpr; @@ -101392,27 +102660,30 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ while( p ){ int op = p->op; if( op==TK_REGISTER ) op = p->op2; - if( (op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_TRIGGER) - && p->y.pTab!=0 - ){ - /* op==TK_REGISTER && p->y.pTab!=0 happens when pExpr was originally - ** a TK_COLUMN but was previously evaluated and cached in a register */ - int j = p->iColumn; - if( j>=0 ){ - const char *zColl = p->y.pTab->aCol[j].zColl; - pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); + if( op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_TRIGGER ){ + assert( ExprUseYTab(p) ); + if( p->y.pTab!=0 ){ + /* op==TK_REGISTER && p->y.pTab!=0 happens when pExpr was originally + ** a TK_COLUMN but was previously evaluated and cached in a register */ + int j = p->iColumn; + if( j>=0 ){ + const char *zColl = sqlite3ColumnColl(&p->y.pTab->aCol[j]); + pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); + } + break; } - break; } if( op==TK_CAST || op==TK_UPLUS ){ p = p->pLeft; continue; } if( op==TK_VECTOR ){ + assert( ExprUseXList(p) ); p = p->x.pList->a[0].pExpr; continue; } if( op==TK_COLLATE ){ + assert( !ExprHasProperty(p, EP_IntValue) ); pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken); break; } @@ -101422,11 +102693,9 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ }else{ Expr *pNext = p->pRight; /* The Expr.x union is never used at the same time as Expr.pRight */ + assert( ExprUseXList(p) ); assert( p->x.pList==0 || p->pRight==0 ); - if( p->x.pList!=0 - && !db->mallocFailed - && ALWAYS(!ExprHasProperty(p, EP_xIsSelect)) - ){ + if( p->x.pList!=0 && !db->mallocFailed ){ int i; for(i=0; ALWAYS(ix.pList->nExpr); i++){ if( ExprHasProperty(p->x.pList->a[i].pExpr, EP_Collate) ){ @@ -101509,7 +102778,7 @@ static char comparisonAffinity(const Expr *pExpr){ aff = sqlite3ExprAffinity(pExpr->pLeft); if( pExpr->pRight ){ aff = sqlite3CompareAffinity(pExpr->pRight, aff); - }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + }else if( ExprUseXSelect(pExpr) ){ aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff); }else if( aff==0 ){ aff = SQLITE_AFF_BLOB; @@ -101635,7 +102904,7 @@ static int codeCompare( ** But a TK_SELECT might be either a vector or a scalar. It is only ** considered a vector if it has two or more result columns. */ -SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr){ +SQLITE_PRIVATE int sqlite3ExprIsVector(const Expr *pExpr){ return sqlite3ExprVectorSize(pExpr)>1; } @@ -101645,12 +102914,14 @@ SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr){ ** is a sub-select, return the number of columns in the sub-select. For ** any other type of expression, return 1. */ -SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr){ +SQLITE_PRIVATE int sqlite3ExprVectorSize(const Expr *pExpr){ u8 op = pExpr->op; if( op==TK_REGISTER ) op = pExpr->op2; if( op==TK_VECTOR ){ + assert( ExprUseXList(pExpr) ); return pExpr->x.pList->nExpr; }else if( op==TK_SELECT ){ + assert( ExprUseXSelect(pExpr) ); return pExpr->x.pSelect->pEList->nExpr; }else{ return 1; @@ -101677,8 +102948,10 @@ SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr *pVector, int i){ if( sqlite3ExprIsVector(pVector) ){ assert( pVector->op2==0 || pVector->op==TK_REGISTER ); if( pVector->op==TK_SELECT || pVector->op2==TK_SELECT ){ + assert( ExprUseXSelect(pVector) ); return pVector->x.pSelect->pEList->a[i].pExpr; }else{ + assert( ExprUseXList(pVector) ); return pVector->x.pList->a[i].pExpr; } } @@ -101709,11 +102982,12 @@ SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr *pVector, int i){ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField( Parse *pParse, /* Parsing context */ Expr *pVector, /* The vector. List of expressions or a sub-SELECT */ - int iField /* Which column of the vector to return */ + int iField, /* Which column of the vector to return */ + int nField /* Total number of columns in the vector */ ){ Expr *pRet; if( pVector->op==TK_SELECT ){ - assert( pVector->flags & EP_xIsSelect ); + assert( ExprUseXSelect(pVector) ); /* The TK_SELECT_COLUMN Expr node: ** ** pLeft: pVector containing TK_SELECT. Not deleted. @@ -101732,14 +103006,23 @@ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField( */ pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0); if( pRet ){ + pRet->iTable = nField; pRet->iColumn = iField; pRet->pLeft = pVector; } - assert( pRet==0 || pRet->iTable==0 ); }else{ - if( pVector->op==TK_VECTOR ) pVector = pVector->x.pList->a[iField].pExpr; + if( pVector->op==TK_VECTOR ){ + Expr **ppVector; + assert( ExprUseXList(pVector) ); + ppVector = &pVector->x.pList->a[iField].pExpr; + pVector = *ppVector; + if( IN_RENAME_OBJECT ){ + /* This must be a vector UPDATE inside a trigger */ + *ppVector = 0; + return pVector; + } + } pRet = sqlite3ExprDup(pParse->db, pVector, 0); - sqlite3RenameTokenRemap(pParse, pRet, pVector); } return pRet; } @@ -101795,10 +103078,12 @@ static int exprVectorRegister( return pVector->iTable+iField; } if( op==TK_SELECT ){ + assert( ExprUseXSelect(pVector) ); *ppExpr = pVector->x.pSelect->pEList->a[iField].pExpr; return regSelect+iField; } if( op==TK_VECTOR ){ + assert( ExprUseXList(pVector) ); *ppExpr = pVector->x.pList->a[iField].pExpr; return sqlite3ExprCodeTemp(pParse, *ppExpr, pRegFree); } @@ -101932,14 +103217,14 @@ SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse *pParse, int nHeight){ ** to by pnHeight, the second parameter, then set *pnHeight to that ** value. */ -static void heightOfExpr(Expr *p, int *pnHeight){ +static void heightOfExpr(const Expr *p, int *pnHeight){ if( p ){ if( p->nHeight>*pnHeight ){ *pnHeight = p->nHeight; } } } -static void heightOfExprList(ExprList *p, int *pnHeight){ +static void heightOfExprList(const ExprList *p, int *pnHeight){ if( p ){ int i; for(i=0; inExpr; i++){ @@ -101947,8 +103232,8 @@ static void heightOfExprList(ExprList *p, int *pnHeight){ } } } -static void heightOfSelect(Select *pSelect, int *pnHeight){ - Select *p; +static void heightOfSelect(const Select *pSelect, int *pnHeight){ + const Select *p; for(p=pSelect; p; p=p->pPrior){ heightOfExpr(p->pWhere, pnHeight); heightOfExpr(p->pHaving, pnHeight); @@ -101970,10 +103255,9 @@ static void heightOfSelect(Select *pSelect, int *pnHeight){ ** if appropriate. */ static void exprSetHeight(Expr *p){ - int nHeight = 0; - heightOfExpr(p->pLeft, &nHeight); - heightOfExpr(p->pRight, &nHeight); - if( ExprHasProperty(p, EP_xIsSelect) ){ + int nHeight = p->pLeft ? p->pLeft->nHeight : 0; + if( p->pRight && p->pRight->nHeight>nHeight ) nHeight = p->pRight->nHeight; + if( ExprUseXSelect(p) ){ heightOfSelect(p->x.pSelect, &nHeight); }else if( p->x.pList ){ heightOfExprList(p->x.pList, &nHeight); @@ -102000,7 +103284,7 @@ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){ ** Return the maximum height of any expression tree referenced ** by the select statement passed as an argument. */ -SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *p){ +SQLITE_PRIVATE int sqlite3SelectExprHeight(const Select *p){ int nHeight = 0; heightOfSelect(p, &nHeight); return nHeight; @@ -102012,7 +103296,7 @@ SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *p){ */ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){ if( pParse->nErr ) return; - if( p && p->x.pList && !ExprHasProperty(p, EP_xIsSelect) ){ + if( p && ExprUseXList(p) && p->x.pList ){ p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList); } } @@ -102170,6 +103454,63 @@ SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse *pParse, Expr *pExpr, Select *pS } } +/* +** Expression list pEList is a list of vector values. This function +** converts the contents of pEList to a VALUES(...) Select statement +** returning 1 row for each element of the list. For example, the +** expression list: +** +** ( (1,2), (3,4) (5,6) ) +** +** is translated to the equivalent of: +** +** VALUES(1,2), (3,4), (5,6) +** +** Each of the vector values in pEList must contain exactly nElem terms. +** If a list element that is not a vector or does not contain nElem terms, +** an error message is left in pParse. +** +** This is used as part of processing IN(...) expressions with a list +** of vectors on the RHS. e.g. "... IN ((1,2), (3,4), (5,6))". +*/ +SQLITE_PRIVATE Select *sqlite3ExprListToValues(Parse *pParse, int nElem, ExprList *pEList){ + int ii; + Select *pRet = 0; + assert( nElem>1 ); + for(ii=0; iinExpr; ii++){ + Select *pSel; + Expr *pExpr = pEList->a[ii].pExpr; + int nExprElem; + if( pExpr->op==TK_VECTOR ){ + assert( ExprUseXList(pExpr) ); + nExprElem = pExpr->x.pList->nExpr; + }else{ + nExprElem = 1; + } + if( nExprElem!=nElem ){ + sqlite3ErrorMsg(pParse, "IN(...) element has %d term%s - expected %d", + nExprElem, nExprElem>1?"s":"", nElem + ); + break; + } + assert( ExprUseXList(pExpr) ); + pSel = sqlite3SelectNew(pParse, pExpr->x.pList, 0, 0, 0, 0, 0, SF_Values,0); + pExpr->x.pList = 0; + if( pSel ){ + if( pRet ){ + pSel->op = TK_ALL; + pSel->pPrior = pRet; + } + pRet = pSel; + } + } + + if( pRet && pRet->pPrior ){ + pRet->selFlags |= SF_MultiValue; + } + sqlite3ExprListDelete(pParse->db, pEList); + return pRet; +} /* ** Join two expressions using an AND operator. If either expression is @@ -102203,7 +103544,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ SQLITE_PRIVATE Expr *sqlite3ExprFunction( Parse *pParse, /* Parsing context */ ExprList *pList, /* Argument list */ - Token *pToken, /* Name of the function */ + const Token *pToken, /* Name of the function */ int eDistinct /* SF_Distinct or SF_ALL or 0 */ ){ Expr *pNew; @@ -102214,12 +103555,16 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction( sqlite3ExprListDelete(db, pList); /* Avoid memory leak when malloc fails */ return 0; } - if( pList && pList->nExpr > pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){ + pNew->w.iOfst = (int)(pToken->z - pParse->zTail); + if( pList + && pList->nExpr > pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] + && !pParse->nested + ){ sqlite3ErrorMsg(pParse, "too many arguments on function %T", pToken); } pNew->x.pList = pList; ExprSetProperty(pNew, EP_HasFunc); - assert( !ExprHasProperty(pNew, EP_xIsSelect) ); + assert( ExprUseXList(pNew) ); sqlite3ExprSetHeightAndFlags(pParse, pNew); if( eDistinct==SF_Distinct ) ExprSetProperty(pNew, EP_Distinct); return pNew; @@ -102238,8 +103583,8 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction( */ SQLITE_PRIVATE void sqlite3ExprFunctionUsable( Parse *pParse, /* Parsing and code generating context */ - Expr *pExpr, /* The function invocation */ - FuncDef *pDef /* The function being invoked */ + const Expr *pExpr, /* The function invocation */ + const FuncDef *pDef /* The function being invoked */ ){ assert( !IN_RENAME_OBJECT ); assert( (pDef->funcFlags & (SQLITE_FUNC_DIRECT|SQLITE_FUNC_UNSAFE))!=0 ); @@ -102254,7 +103599,7 @@ SQLITE_PRIVATE void sqlite3ExprFunctionUsable( ** SQLITE_DBCONFIG_TRUSTED_SCHEMA is off (meaning ** that the schema is possibly tainted). */ - sqlite3ErrorMsg(pParse, "unsafe use of %s()", pDef->zName); + sqlite3ErrorMsg(pParse, "unsafe use of %#T()", pExpr); } } } @@ -102310,6 +103655,7 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n if( bOk==0 || i<1 || i>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ sqlite3ErrorMsg(pParse, "variable number must be between ?1 and ?%d", db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]); + sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr); return; } x = (ynVar)i; @@ -102337,6 +103683,7 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n pExpr->iColumn = x; if( x>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ sqlite3ErrorMsg(pParse, "too many SQL variables"); + sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr); } } @@ -102345,27 +103692,26 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n */ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ assert( p!=0 ); - /* Sanity check: Assert that the IntValue is non-negative if it exists */ - assert( !ExprHasProperty(p, EP_IntValue) || p->u.iValue>=0 ); - - assert( !ExprHasProperty(p, EP_WinFunc) || p->y.pWin!=0 || db->mallocFailed ); - assert( p->op!=TK_FUNCTION || ExprHasProperty(p, EP_TokenOnly|EP_Reduced) - || p->y.pWin==0 || ExprHasProperty(p, EP_WinFunc) ); + assert( !ExprUseUValue(p) || p->u.iValue>=0 ); + assert( !ExprUseYWin(p) || !ExprUseYSub(p) ); + assert( !ExprUseYWin(p) || p->y.pWin!=0 || db->mallocFailed ); + assert( p->op!=TK_FUNCTION || !ExprUseYSub(p) ); #ifdef SQLITE_DEBUG if( ExprHasProperty(p, EP_Leaf) && !ExprHasProperty(p, EP_TokenOnly) ){ assert( p->pLeft==0 ); assert( p->pRight==0 ); - assert( p->x.pSelect==0 ); + assert( !ExprUseXSelect(p) || p->x.pSelect==0 ); + assert( !ExprUseXList(p) || p->x.pList==0 ); } #endif if( !ExprHasProperty(p, (EP_TokenOnly|EP_Leaf)) ){ /* The Expr.x union is never used at the same time as Expr.pRight */ - assert( p->x.pList==0 || p->pRight==0 ); + assert( (ExprUseXList(p) && p->x.pList==0) || p->pRight==0 ); if( p->pLeft && p->op!=TK_SELECT_COLUMN ) sqlite3ExprDeleteNN(db, p->pLeft); if( p->pRight ){ assert( !ExprHasProperty(p, EP_WinFunc) ); sqlite3ExprDeleteNN(db, p->pRight); - }else if( ExprHasProperty(p, EP_xIsSelect) ){ + }else if( ExprUseXSelect(p) ){ assert( !ExprHasProperty(p, EP_WinFunc) ); sqlite3SelectDelete(db, p->x.pSelect); }else{ @@ -102377,7 +103723,10 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ #endif } } - if( ExprHasProperty(p, EP_MemToken) ) sqlite3DbFree(db, p->u.zToken); + if( ExprHasProperty(p, EP_MemToken) ){ + assert( !ExprHasProperty(p, EP_IntValue) ); + sqlite3DbFree(db, p->u.zToken); + } if( !ExprHasProperty(p, EP_Static) ){ sqlite3DbFreeNN(db, p); } @@ -102419,7 +103768,7 @@ SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse *pParse, Expr *p){ ** passed as the first argument. This is always one of EXPR_FULLSIZE, ** EXPR_REDUCEDSIZE or EXPR_TOKENONLYSIZE. */ -static int exprStructSize(Expr *p){ +static int exprStructSize(const Expr *p){ if( ExprHasProperty(p, EP_TokenOnly) ) return EXPR_TOKENONLYSIZE; if( ExprHasProperty(p, EP_Reduced) ) return EXPR_REDUCEDSIZE; return EXPR_FULLSIZE; @@ -102459,7 +103808,7 @@ static int exprStructSize(Expr *p){ ** of dupedExprStructSize() contain multiple assert() statements that attempt ** to enforce this constraint. */ -static int dupedExprStructSize(Expr *p, int flags){ +static int dupedExprStructSize(const Expr *p, int flags){ int nSize; assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ assert( EXPR_FULLSIZE<=0xfff ); @@ -102490,7 +103839,7 @@ static int dupedExprStructSize(Expr *p, int flags){ ** of the Expr structure and a copy of the Expr.u.zToken string (if that ** string is defined.) */ -static int dupedExprNodeSize(Expr *p, int flags){ +static int dupedExprNodeSize(const Expr *p, int flags){ int nByte = dupedExprStructSize(p, flags) & 0xfff; if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ nByte += sqlite3Strlen30NN(p->u.zToken)+1; @@ -102511,7 +103860,7 @@ static int dupedExprNodeSize(Expr *p, int flags){ ** and Expr.pRight variables (but not for any structures pointed to or ** descended from the Expr.x.pList or Expr.x.pSelect variables). */ -static int dupedExprSize(Expr *p, int flags){ +static int dupedExprSize(const Expr *p, int flags){ int nByte = 0; if( p ){ nByte = dupedExprNodeSize(p, flags); @@ -102530,7 +103879,7 @@ static int dupedExprSize(Expr *p, int flags){ ** if any. Before returning, *pzBuffer is set to the first byte past the ** portion of the buffer copied into by this function. */ -static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ +static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){ Expr *pNew; /* Value to return */ u8 *zAlloc; /* Memory space from which to build Expr object */ u32 staticFlag; /* EP_Static if space not obtained from malloc */ @@ -102593,7 +103942,7 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ if( 0==((p->flags|pNew->flags) & (EP_TokenOnly|EP_Leaf)) ){ /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ - if( ExprHasProperty(p, EP_xIsSelect) ){ + if( ExprUseXSelect(p) ){ pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags); }else{ pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, dupFlags); @@ -102622,7 +103971,6 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){ if( pNew->op==TK_SELECT_COLUMN ){ pNew->pLeft = p->pLeft; - assert( p->iColumn==0 || p->pRight==0 ); assert( p->pRight==0 || p->pRight==p->pLeft || ExprHasProperty(p->pLeft, EP_Subquery) ); }else{ @@ -102712,15 +104060,17 @@ static void gatherSelectWindows(Select *p){ ** truncated version of the usual Expr structure that will be stored as ** part of the in-memory representation of the database schema. */ -SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3 *db, Expr *p, int flags){ +SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3 *db, const Expr *p, int flags){ assert( flags==0 || flags==EXPRDUP_REDUCE ); return p ? exprDup(db, p, flags, 0) : 0; } -SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags){ +SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, const ExprList *p, int flags){ ExprList *pNew; - struct ExprList_item *pItem, *pOldItem; + struct ExprList_item *pItem; + const struct ExprList_item *pOldItem; int i; - Expr *pPriorSelectCol = 0; + Expr *pPriorSelectColOld = 0; + Expr *pPriorSelectColNew = 0; assert( db!=0 ); if( p==0 ) return 0; pNew = sqlite3DbMallocRawNN(db, sqlite3DbMallocSize(db, p)); @@ -102737,17 +104087,17 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags) && pOldExpr->op==TK_SELECT_COLUMN && (pNewExpr = pItem->pExpr)!=0 ){ - assert( pNewExpr->iColumn==0 || i>0 ); - if( pNewExpr->iColumn==0 ){ - assert( pOldExpr->pLeft==pOldExpr->pRight - || ExprHasProperty(pOldExpr->pLeft, EP_Subquery) ); - pPriorSelectCol = pNewExpr->pLeft = pNewExpr->pRight; + if( pNewExpr->pRight ){ + pPriorSelectColOld = pOldExpr->pRight; + pPriorSelectColNew = pNewExpr->pRight; + pNewExpr->pLeft = pNewExpr->pRight; }else{ - assert( i>0 ); - assert( pItem[-1].pExpr!=0 ); - assert( pNewExpr->iColumn==pItem[-1].pExpr->iColumn+1 ); - assert( pPriorSelectCol==pItem[-1].pExpr->pLeft ); - pNewExpr->pLeft = pPriorSelectCol; + if( pOldExpr->pLeft!=pPriorSelectColOld ){ + pPriorSelectColOld = pOldExpr->pLeft; + pPriorSelectColNew = sqlite3ExprDup(db, pPriorSelectColOld, flags); + pNewExpr->pRight = pPriorSelectColNew; + } + pNewExpr->pLeft = pPriorSelectColNew; } } pItem->zEName = sqlite3DbStrDup(db, pOldItem->zEName); @@ -102769,7 +104119,7 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags) */ #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) \ || !defined(SQLITE_OMIT_SUBQUERY) -SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ +SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int flags){ SrcList *pNew; int i; int nByte; @@ -102781,7 +104131,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ pNew->nSrc = pNew->nAlloc = p->nSrc; for(i=0; inSrc; i++){ SrcItem *pNewItem = &pNew->a[i]; - SrcItem *pOldItem = &p->a[i]; + const SrcItem *pOldItem = &p->a[i]; Table *pTab; pNewItem->pSchema = pOldItem->pSchema; pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); @@ -102813,7 +104163,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ } return pNew; } -SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){ +SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, const IdList *p){ IdList *pNew; int i; assert( db!=0 ); @@ -102837,11 +104187,11 @@ SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){ } return pNew; } -SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *pDup, int flags){ +SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *pDup, int flags){ Select *pRet = 0; Select *pNext = 0; Select **pp = &pRet; - Select *p; + const Select *p; assert( db!=0 ); for(p=pDup; p; p=p->pPrior){ @@ -102886,7 +104236,7 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *pDup, int flags){ return pRet; } #else -SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ +SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *p, int flags){ assert( p==0 ); return 0; } @@ -103006,11 +104356,9 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector( } for(i=0; inId; i++){ - Expr *pSubExpr = sqlite3ExprForVectorField(pParse, pExpr, i); + Expr *pSubExpr = sqlite3ExprForVectorField(pParse, pExpr, i, pColumns->nId); assert( pSubExpr!=0 || db->mallocFailed ); - assert( pSubExpr==0 || pSubExpr->iTable==0 ); if( pSubExpr==0 ) continue; - pSubExpr->iTable = pColumns->nId; pList = sqlite3ExprListAppend(pParse, pList, pSubExpr); if( pList ){ assert( pList->nExpr==iFirst+i+1 ); @@ -103084,7 +104432,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList *p, int iSortOrder, int SQLITE_PRIVATE void sqlite3ExprListSetName( Parse *pParse, /* Parsing context */ ExprList *pList, /* List to which to add the span. */ - Token *pName, /* Name to be added */ + const Token *pName, /* Name to be added */ int dequote /* True to cause the name to be dequoted */ ){ assert( pList!=0 || pParse->db->mallocFailed!=0 ); @@ -103102,7 +104450,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetName( ** to the token-map. */ sqlite3Dequote(pItem->zEName); if( IN_RENAME_OBJECT ){ - sqlite3RenameTokenMap(pParse, (void*)pItem->zEName, pName); + sqlite3RenameTokenMap(pParse, (const void*)pItem->zEName, pName); } } } @@ -103221,7 +104569,7 @@ SQLITE_PRIVATE u32 sqlite3IsTrueOrFalse(const char *zIn){ SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr *pExpr){ u32 v; assert( pExpr->op==TK_ID || pExpr->op==TK_STRING ); - if( !ExprHasProperty(pExpr, EP_Quoted) + if( !ExprHasProperty(pExpr, EP_Quoted|EP_IntValue) && (v = sqlite3IsTrueOrFalse(pExpr->u.zToken))!=0 ){ pExpr->op = TK_TRUEFALSE; @@ -103238,6 +104586,7 @@ SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr *pExpr){ SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr *pExpr){ pExpr = sqlite3ExprSkipCollate((Expr*)pExpr); assert( pExpr->op==TK_TRUEFALSE ); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); assert( sqlite3StrICmp(pExpr->u.zToken,"true")==0 || sqlite3StrICmp(pExpr->u.zToken,"false")==0 ); return pExpr->u.zToken[4]==0; @@ -103421,6 +104770,38 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ return exprIsConst(p, 3, iCur); } +/* +** Check pExpr to see if it is an invariant constraint on data source pSrc. +** This is an optimization. False negatives will perhaps cause slower +** queries, but false positives will yield incorrect answers. So when in +** double, return 0. +** +** To be an invariant constraint, the following must be true: +** +** (1) pExpr cannot refer to any table other than pSrc->iCursor. +** +** (2) pExpr cannot use subqueries or non-deterministic functions. +** +** (*) ** Not applicable to this branch ** +** +** (4) If pSrc is the right operand of a LEFT JOIN, then... +** (4a) pExpr must come from an ON clause.. +** (4b) and specifically the ON clause associated with the LEFT JOIN. +** +** (5) If pSrc is not the right operand of a LEFT JOIN or the left +** operand of a RIGHT JOIN, then pExpr must be from the WHERE +** clause, not an ON clause. +*/ +SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr *pExpr, const SrcItem *pSrc){ + if( pSrc->fg.jointype & JT_LEFT ){ + if( !ExprHasProperty(pExpr, EP_FromJoin) ) return 0; /* rule (4a) */ + if( pExpr->w.iRightJoinTable!=pSrc->iCursor ) return 0; /* rule (4b) */ + }else{ + if( ExprHasProperty(pExpr, EP_FromJoin) ) return 0; /* rule (5) */ + } + return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */ +} + /* ** sqlite3WalkExpr() callback used by sqlite3ExprIsConstantOrGroupBy(). @@ -103442,7 +104823,7 @@ static int exprNodeIsConstantOrGroupBy(Walker *pWalker, Expr *pExpr){ } /* Check if pExpr is a sub-select. If so, consider it variable. */ - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ pWalker->eCode = 0; return WRC_Abort; } @@ -103530,7 +104911,7 @@ SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr *p){ ** in *pValue. If the expression is not an integer or if it is too big ** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged. */ -SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr *p, int *pValue){ +SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr *p, int *pValue){ int rc = 0; if( NEVER(p==0) ) return 0; /* Used to only happen following on OOM */ @@ -103549,9 +104930,9 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr *p, int *pValue){ break; } case TK_UMINUS: { - int v; + int v = 0; if( sqlite3ExprIsInteger(p->pLeft, &v) ){ - assert( v!=(-2147483647-1) ); + assert( ((unsigned int)v)!=0x80000000 ); *pValue = -v; rc = 1; } @@ -103592,10 +104973,11 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ case TK_BLOB: return 0; case TK_COLUMN: + assert( ExprUseYTab(p) ); return ExprHasProperty(p, EP_CanBeNull) || p->y.pTab==0 || /* Reference to column of index on expression */ (p->iColumn>=0 - && ALWAYS(p->y.pTab->aCol!=0) /* Defense against OOM problems */ + && p->y.pTab->aCol!=0 /* Possible due to prior error */ && p->y.pTab->aCol[p->iColumn].notNull==0); default: return 1; @@ -103663,13 +105045,13 @@ SQLITE_PRIVATE int sqlite3IsRowid(const char *z){ ** table, then return NULL. */ #ifndef SQLITE_OMIT_SUBQUERY -static Select *isCandidateForInOpt(Expr *pX){ +static Select *isCandidateForInOpt(const Expr *pX){ Select *p; SrcList *pSrc; ExprList *pEList; Table *pTab; int i; - if( !ExprHasProperty(pX, EP_xIsSelect) ) return 0; /* Not a subquery */ + if( !ExprUseXSelect(pX) ) return 0; /* Not a subquery */ if( ExprHasProperty(pX, EP_VarSelect) ) return 0; /* Correlated subq */ p = pX->x.pSelect; if( p->pPrior ) return 0; /* Not a compound SELECT */ @@ -103687,7 +105069,7 @@ static Select *isCandidateForInOpt(Expr *pX){ if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */ pTab = pSrc->a[0].pTab; assert( pTab!=0 ); - assert( pTab->pSelect==0 ); /* FROM clause is not a view */ + assert( !IsView(pTab) ); /* FROM clause is not a view */ if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */ pEList = p->pEList; assert( pEList!=0 ); @@ -103840,7 +105222,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( ** or not NULL is actually possible (it may not be, for example, due ** to NOT NULL constraints in the schema). If no NULL values are possible, ** set prRhsHasNull to 0 before continuing. */ - if( prRhsHasNull && (pX->flags & EP_xIsSelect) ){ + if( prRhsHasNull && ExprUseXSelect(pX) ){ int i; ExprList *pEList = pX->x.pSelect->pEList; for(i=0; inExpr; i++){ @@ -103996,7 +105378,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( */ if( eType==0 && (inFlags & IN_INDEX_NOOP_OK) - && !ExprHasProperty(pX, EP_xIsSelect) + && ExprUseXList(pX) && (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2) ){ eType = IN_INDEX_NOOP; @@ -104041,10 +105423,10 @@ SQLITE_PRIVATE int sqlite3FindInIndex( ** It is the responsibility of the caller to ensure that the returned ** string is eventually freed using sqlite3DbFree(). */ -static char *exprINAffinity(Parse *pParse, Expr *pExpr){ +static char *exprINAffinity(Parse *pParse, const Expr *pExpr){ Expr *pLeft = pExpr->pLeft; int nVal = sqlite3ExprVectorSize(pLeft); - Select *pSelect = (pExpr->flags & EP_xIsSelect) ? pExpr->x.pSelect : 0; + Select *pSelect = ExprUseXSelect(pExpr) ? pExpr->x.pSelect : 0; char *zRet; assert( pExpr->op==TK_IN ); @@ -104094,7 +105476,7 @@ SQLITE_PRIVATE void sqlite3SubselectError(Parse *pParse, int nActual, int nExpec */ SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse *pParse, Expr *pExpr){ #ifndef SQLITE_OMIT_SUBQUERY - if( pExpr->flags & EP_xIsSelect ){ + if( ExprUseXSelect(pExpr) ){ sqlite3SubselectError(pParse, pExpr->x.pSelect->pEList->nExpr, 1); }else #endif @@ -104158,10 +105540,11 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( */ if( ExprHasProperty(pExpr, EP_Subrtn) ){ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ ExplainQueryPlan((pParse, 0, "REUSE LIST SUBQUERY %d", pExpr->x.pSelect->selId)); } + assert( ExprUseYSub(pExpr) ); sqlite3VdbeAddOp2(v, OP_Gosub, pExpr->y.sub.regReturn, pExpr->y.sub.iAddr); sqlite3VdbeAddOp2(v, OP_OpenDup, iTab, pExpr->iTable); @@ -104170,6 +105553,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( } /* Begin coding the subroutine */ + assert( !ExprUseYWin(pExpr) ); ExprSetProperty(pExpr, EP_Subrtn); assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); pExpr->y.sub.regReturn = ++pParse->nMem; @@ -104190,7 +105574,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( pExpr->iTable = iTab; addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pExpr->iTable, nVal); #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ VdbeComment((v, "Result of SELECT %u", pExpr->x.pSelect->selId)); }else{ VdbeComment((v, "RHS of IN operator")); @@ -104198,7 +105582,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( #endif pKeyInfo = sqlite3KeyInfoAlloc(pParse->db, nVal, 1); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ /* Case 1: expr IN (SELECT ...) ** ** Generate code to write the results of the select into the temporary @@ -104296,6 +105680,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( if( addrOnce ){ sqlite3VdbeJumpHere(v, addrOnce); /* Subroutine return */ + assert( ExprUseYSub(pExpr) ); sqlite3VdbeAddOp1(v, OP_Return, pExpr->y.sub.regReturn); sqlite3VdbeChangeP1(v, pExpr->y.sub.iAddr-1, sqlite3VdbeCurrentAddr(v)-1); sqlite3ClearTempRegCache(pParse); @@ -104332,19 +105717,22 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ testcase( pExpr->op==TK_EXISTS ); testcase( pExpr->op==TK_SELECT ); assert( pExpr->op==TK_EXISTS || pExpr->op==TK_SELECT ); - assert( ExprHasProperty(pExpr, EP_xIsSelect) ); + assert( ExprUseXSelect(pExpr) ); pSel = pExpr->x.pSelect; /* If this routine has already been coded, then invoke it as a ** subroutine. */ if( ExprHasProperty(pExpr, EP_Subrtn) ){ ExplainQueryPlan((pParse, 0, "REUSE SUBQUERY %d", pSel->selId)); + assert( ExprUseYSub(pExpr) ); sqlite3VdbeAddOp2(v, OP_Gosub, pExpr->y.sub.regReturn, pExpr->y.sub.iAddr); return pExpr->iTable; } /* Begin coding the subroutine */ + assert( !ExprUseYWin(pExpr) ); + assert( !ExprHasProperty(pExpr, EP_Reduced|EP_TokenOnly) ); ExprSetProperty(pExpr, EP_Subrtn); pExpr->y.sub.regReturn = ++pParse->nMem; pExpr->y.sub.iAddr = @@ -104411,10 +105799,8 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ } pSel->iLimit = 0; if( sqlite3Select(pParse, pSel, &dest) ){ - if( pParse->nErr ){ - pExpr->op2 = pExpr->op; - pExpr->op = TK_ERROR; - } + pExpr->op2 = pExpr->op; + pExpr->op = TK_ERROR; return 0; } pExpr->iTable = rReg = dest.iSDParm; @@ -104424,6 +105810,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ } /* Subroutine return */ + assert( ExprUseYSub(pExpr) ); sqlite3VdbeAddOp1(v, OP_Return, pExpr->y.sub.regReturn); sqlite3VdbeChangeP1(v, pExpr->y.sub.iAddr-1, sqlite3VdbeCurrentAddr(v)-1); sqlite3ClearTempRegCache(pParse); @@ -104440,7 +105827,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ */ SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse *pParse, Expr *pIn){ int nVector = sqlite3ExprVectorSize(pIn->pLeft); - if( (pIn->flags & EP_xIsSelect)!=0 && !pParse->db->mallocFailed ){ + if( ExprUseXSelect(pIn) && !pParse->db->mallocFailed ){ if( nVector!=pIn->x.pSelect->pEList->nExpr ){ sqlite3SubselectError(pParse, pIn->x.pSelect->pEList->nExpr, nVector); return 1; @@ -104574,13 +105961,15 @@ static void sqlite3ExprCodeIN( ** This is step (1) in the in-operator.md optimized algorithm. */ if( eType==IN_INDEX_NOOP ){ - ExprList *pList = pExpr->x.pList; - CollSeq *pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); + ExprList *pList; + CollSeq *pColl; int labelOk = sqlite3VdbeMakeLabel(pParse); int r2, regToFree; int regCkNull = 0; int ii; - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + assert( ExprUseXList(pExpr) ); + pList = pExpr->x.pList; + pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); if( destIfNull!=destIfFalse ){ regCkNull = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_BitAnd, rLhs, rLhs, regCkNull); @@ -104628,10 +106017,9 @@ static void sqlite3ExprCodeIN( }else{ destStep2 = destStep6 = sqlite3VdbeMakeLabel(pParse); } - if( pParse->nErr ) goto sqlite3ExprCodeIN_finished; for(i=0; ipLeft, i); - if( pParse->db->mallocFailed ) goto sqlite3ExprCodeIN_oom_error; + if( pParse->nErr ) goto sqlite3ExprCodeIN_oom_error; if( sqlite3ExprCanBeNull(p) ){ sqlite3VdbeAddOp2(v, OP_IsNull, rLhs+i, destStep2); VdbeCoverage(v); @@ -104769,11 +106157,12 @@ static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){ c = sqlite3DecOrHexToI64(z, &value); if( (c==3 && !negFlag) || (c==2) || (negFlag && value==SMALLEST_INT64)){ #ifdef SQLITE_OMIT_FLOATING_POINT - sqlite3ErrorMsg(pParse, "oversized integer: %s%s", negFlag ? "-" : "", z); + sqlite3ErrorMsg(pParse, "oversized integer: %s%#T", negFlag?"-":"",pExpr); #else #ifndef SQLITE_OMIT_HEX_INTEGER if( sqlite3_strnicmp(z,"0x",2)==0 ){ - sqlite3ErrorMsg(pParse, "hex literal too big: %s%s", negFlag?"-":"",z); + sqlite3ErrorMsg(pParse, "hex literal too big: %s%#T", + negFlag?"-":"",pExpr); }else #endif { @@ -104817,9 +106206,10 @@ SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn( ** and store the result in register regOut */ SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn( - Parse *pParse, - Column *pCol, - int regOut + Parse *pParse, /* Parsing context */ + Table *pTab, /* Table containing the generated column */ + Column *pCol, /* The generated column */ + int regOut /* Put the result in this register */ ){ int iAddr; Vdbe *v = pParse->pVdbe; @@ -104830,7 +106220,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn( }else{ iAddr = 0; } - sqlite3ExprCodeCopy(pParse, pCol->pDflt, regOut); + sqlite3ExprCodeCopy(pParse, sqlite3ColumnExpr(pTab,pCol), regOut); if( pCol->affinity>=SQLITE_AFF_TEXT ){ sqlite3VdbeAddOp4(v, OP_Affinity, regOut, 1, 0, &pCol->affinity, 1); } @@ -104866,12 +106256,13 @@ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable( }else if( (pCol = &pTab->aCol[iCol])->colFlags & COLFLAG_VIRTUAL ){ Parse *pParse = sqlite3VdbeParser(v); if( pCol->colFlags & COLFLAG_BUSY ){ - sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"", pCol->zName); + sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"", + pCol->zCnName); }else{ int savedSelfTab = pParse->iSelfTab; pCol->colFlags |= COLFLAG_BUSY; pParse->iSelfTab = iTabCur+1; - sqlite3ExprCodeGeneratedColumn(pParse, pCol, regOut); + sqlite3ExprCodeGeneratedColumn(pParse, pTab, pCol, regOut); pParse->iSelfTab = savedSelfTab; pCol->colFlags &= ~COLFLAG_BUSY; } @@ -104964,6 +106355,7 @@ static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){ int i; iResult = pParse->nMem+1; pParse->nMem += nResult; + assert( ExprUseXList(p) ); for(i=0; ix.pList->a[i].pExpr, i+iResult); } @@ -105038,6 +106430,7 @@ static int exprCodeInlineFunction( ** Test-only SQL functions that are only usable if enabled ** via SQLITE_TESTCTRL_INTERNAL_FUNCTIONS */ +#if !defined(SQLITE_UNTESTABLE) case INLINEFUNC_expr_compare: { /* Compare two expressions using sqlite3ExprCompare() */ assert( nFarg==2 ); @@ -105071,7 +106464,6 @@ static int exprCodeInlineFunction( break; } -#ifdef SQLITE_DEBUG case INLINEFUNC_affinity: { /* The AFFINITY() function evaluates to a string that describes ** the type affinity of the argument. This is used for testing of @@ -105085,7 +106477,7 @@ static int exprCodeInlineFunction( (aff<=SQLITE_AFF_NONE) ? "none" : azAff[aff-SQLITE_AFF_BLOB]); break; } -#endif +#endif /* !defined(SQLITE_UNTESTABLE) */ } return target; } @@ -105139,7 +106531,8 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) if( pCol->iColumn<0 ){ VdbeComment((v,"%s.rowid",pTab->zName)); }else{ - VdbeComment((v,"%s.%s",pTab->zName,pTab->aCol[pCol->iColumn].zName)); + VdbeComment((v,"%s.%s", + pTab->zName, pTab->aCol[pCol->iColumn].zCnName)); if( pTab->aCol[pCol->iColumn].affinity==SQLITE_AFF_REAL ){ sqlite3VdbeAddOp1(v, OP_RealAffinity, target); } @@ -105161,6 +106554,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) */ int aff; iReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft,target); + assert( ExprUseYTab(pExpr) ); if( pExpr->y.pTab ){ aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); }else{ @@ -105184,9 +106578,11 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) ** immediately prior to the first column. */ Column *pCol; - Table *pTab = pExpr->y.pTab; + Table *pTab; int iSrc; int iCol = pExpr->iColumn; + assert( ExprUseYTab(pExpr) ); + pTab = pExpr->y.pTab; assert( pTab!=0 ); assert( iCol>=XN_ROWID ); assert( iColnCol ); @@ -105200,12 +106596,12 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) if( pCol->colFlags & COLFLAG_GENERATED ){ if( pCol->colFlags & COLFLAG_BUSY ){ sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"", - pCol->zName); + pCol->zCnName); return 0; } pCol->colFlags |= COLFLAG_BUSY; if( pCol->colFlags & COLFLAG_NOTAVAIL ){ - sqlite3ExprCodeGeneratedColumn(pParse, pCol, iSrc); + sqlite3ExprCodeGeneratedColumn(pParse, pTab, pCol, iSrc); } pCol->colFlags &= ~(COLFLAG_BUSY|COLFLAG_NOTAVAIL); return iSrc; @@ -105224,6 +106620,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) iTab = pParse->iSelfTab - 1; } } + assert( ExprUseYTab(pExpr) ); iReg = sqlite3ExprCodeGetColumn(pParse, pExpr->y.pTab, pExpr->iColumn, iTab, target, pExpr->op2); @@ -105301,6 +106698,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); inReg = target; } + assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3VdbeAddOp2(v, OP_Cast, target, sqlite3AffinityType(pExpr->u.zToken, 0)); return inReg; @@ -105440,7 +106838,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) || NEVER(pExpr->iAgg>=pInfo->nFunc) ){ assert( !ExprHasProperty(pExpr, EP_IntValue) ); - sqlite3ErrorMsg(pParse, "misuse of aggregate: %s()", pExpr->u.zToken); + sqlite3ErrorMsg(pParse, "misuse of aggregate: %#T()", pExpr); }else{ return pInfo->aFunc[pExpr->iAgg].iMem; } @@ -105468,8 +106866,8 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) ** multiple times if we know they always give the same result */ return sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1); } - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); assert( !ExprHasProperty(pExpr, EP_TokenOnly) ); + assert( ExprUseXList(pExpr) ); pFarg = pExpr->x.pList; nFarg = pFarg ? pFarg->nExpr : 0; assert( !ExprHasProperty(pExpr, EP_IntValue) ); @@ -105481,7 +106879,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } #endif if( pDef==0 || pDef->xFinalize!=0 ){ - sqlite3ErrorMsg(pParse, "unknown function: %s()", zId); + sqlite3ErrorMsg(pParse, "unknown function: %#T()", pExpr); break; } if( pDef->funcFlags & SQLITE_FUNC_INLINE ){ @@ -105558,7 +106956,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ); } #ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC - if( pDef->funcFlags & SQLITE_FUNC_OFFSET ){ + if( (pDef->funcFlags & SQLITE_FUNC_OFFSET)!=0 && ALWAYS(pFarg!=0) ){ Expr *pArg = pFarg->a[0].pExpr; if( pArg->op==TK_COLUMN ){ sqlite3VdbeAddOp3(v, OP_Offset, pArg->iTable, pArg->iColumn, target); @@ -105588,7 +106986,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) testcase( op==TK_SELECT ); if( pParse->db->mallocFailed ){ return 0; - }else if( op==TK_SELECT && (nCol = pExpr->x.pSelect->pEList->nExpr)!=1 ){ + }else if( op==TK_SELECT + && ALWAYS( ExprUseXSelect(pExpr) ) + && (nCol = pExpr->x.pSelect->pEList->nExpr)!=1 + ){ sqlite3SubselectError(pParse, nCol, 1); }else{ return sqlite3CodeSubselect(pParse, pExpr); @@ -105600,11 +107001,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) if( pExpr->pLeft->iTable==0 ){ pExpr->pLeft->iTable = sqlite3CodeSubselect(pParse, pExpr->pLeft); } - assert( pExpr->iTable==0 || pExpr->pLeft->op==TK_SELECT - || pExpr->pLeft->op==TK_ERROR ); - if( pExpr->iTable!=0 - && pExpr->iTable!=(n = sqlite3ExprVectorSize(pExpr->pLeft)) - ){ + assert( pExpr->pLeft->op==TK_SELECT || pExpr->pLeft->op==TK_ERROR ); + n = sqlite3ExprVectorSize(pExpr->pLeft); + if( pExpr->iTable!=n ){ sqlite3ErrorMsg(pParse, "%d columns assigned %d values", pExpr->iTable, n); } @@ -105672,9 +107071,14 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) ** p1==1 -> old.a p1==4 -> new.a ** p1==2 -> old.b p1==5 -> new.b */ - Table *pTab = pExpr->y.pTab; - int iCol = pExpr->iColumn; - int p1 = pExpr->iTable * (pTab->nCol+1) + 1 + Table *pTab; + int iCol; + int p1; + + assert( ExprUseYTab(pExpr) ); + pTab = pExpr->y.pTab; + iCol = pExpr->iColumn; + p1 = pExpr->iTable * (pTab->nCol+1) + 1 + sqlite3TableColumnToStorage(pTab, iCol); assert( pExpr->iTable==0 || pExpr->iTable==1 ); @@ -105685,7 +107089,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) sqlite3VdbeAddOp2(v, OP_Param, p1, target); VdbeComment((v, "r[%d]=%s.%s", target, (pExpr->iTable ? "new" : "old"), - (pExpr->iColumn<0 ? "rowid" : pExpr->y.pTab->aCol[iCol].zName) + (pExpr->iColumn<0 ? "rowid" : pExpr->y.pTab->aCol[iCol].zCnName) )); #ifndef SQLITE_OMIT_FLOATING_POINT @@ -105762,7 +107166,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) Expr *pDel = 0; sqlite3 *db = pParse->db; - assert( !ExprHasProperty(pExpr, EP_xIsSelect) && pExpr->x.pList ); + assert( ExprUseXList(pExpr) && pExpr->x.pList!=0 ); assert(pExpr->x.pList->nExpr > 0); pEList = pExpr->x.pList; aListelem = pEList->a; @@ -105959,7 +107363,7 @@ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); if( inReg!=target ){ u8 op; - if( ExprHasProperty(pExpr,EP_Subquery) ){ + if( ALWAYS(pExpr) && ExprHasProperty(pExpr,EP_Subquery) ){ op = OP_Copy; }else{ op = OP_SCopy; @@ -106107,7 +107511,7 @@ static void exprCodeBetween( memset(&compRight, 0, sizeof(Expr)); memset(&exprAnd, 0, sizeof(Expr)); - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + assert( ExprUseXList(pExpr) ); pDel = sqlite3ExprDup(db, pExpr->pLeft, 0); if( db->mallocFailed==0 ){ exprAnd.op = TK_AND; @@ -106497,7 +107901,11 @@ SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse *pParse, Expr *pExpr, int dest,i ** Otherwise, if the values are not the same or if pExpr is not a simple ** SQL value, zero is returned. */ -static int exprCompareVariable(Parse *pParse, Expr *pVar, Expr *pExpr){ +static int exprCompareVariable( + const Parse *pParse, + const Expr *pVar, + const Expr *pExpr +){ int res = 0; int iVar; sqlite3_value *pL, *pR = 0; @@ -106549,7 +107957,12 @@ static int exprCompareVariable(Parse *pParse, Expr *pVar, Expr *pExpr){ ** Argument pParse should normally be NULL. If it is not NULL and pA or ** pB causes a return value of 2. */ -SQLITE_PRIVATE int sqlite3ExprCompare(Parse *pParse, Expr *pA, Expr *pB, int iTab){ +SQLITE_PRIVATE int sqlite3ExprCompare( + const Parse *pParse, + const Expr *pA, + const Expr *pB, + int iTab +){ u32 combinedFlags; if( pA==0 || pB==0 ){ return pB==pA ? 0 : 2; @@ -106573,7 +107986,9 @@ SQLITE_PRIVATE int sqlite3ExprCompare(Parse *pParse, Expr *pA, Expr *pB, int iTa } return 2; } - if( pA->op!=TK_COLUMN && pA->op!=TK_AGG_COLUMN && pA->u.zToken ){ + assert( !ExprHasProperty(pA, EP_IntValue) ); + assert( !ExprHasProperty(pB, EP_IntValue) ); + if( pA->u.zToken ){ if( pA->op==TK_FUNCTION || pA->op==TK_AGG_FUNCTION ){ if( sqlite3StrICmp(pA->u.zToken,pB->u.zToken)!=0 ) return 2; #ifndef SQLITE_OMIT_WINDOWFUNC @@ -106591,7 +108006,12 @@ SQLITE_PRIVATE int sqlite3ExprCompare(Parse *pParse, Expr *pA, Expr *pB, int iTa return 0; }else if( pA->op==TK_COLLATE ){ if( sqlite3_stricmp(pA->u.zToken,pB->u.zToken)!=0 ) return 2; - }else if( ALWAYS(pB->u.zToken!=0) && strcmp(pA->u.zToken,pB->u.zToken)!=0 ){ + }else + if( pB->u.zToken!=0 + && pA->op!=TK_COLUMN + && pA->op!=TK_AGG_COLUMN + && strcmp(pA->u.zToken,pB->u.zToken)!=0 + ){ return 2; } } @@ -106633,7 +108053,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(Parse *pParse, Expr *pA, Expr *pB, int iTa ** Two NULL pointers are considered to be the same. But a NULL pointer ** always differs from a non-NULL pointer. */ -SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList *pA, ExprList *pB, int iTab){ +SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList *pA, const ExprList *pB, int iTab){ int i; if( pA==0 && pB==0 ) return 0; if( pA==0 || pB==0 ) return 1; @@ -106652,7 +108072,7 @@ SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList *pA, ExprList *pB, int iTab){ ** Like sqlite3ExprCompare() except COLLATE operators at the top-level ** are ignored. */ -SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA, Expr *pB, int iTab){ +SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA,Expr *pB, int iTab){ return sqlite3ExprCompare(0, sqlite3ExprSkipCollateAndLikely(pA), sqlite3ExprSkipCollateAndLikely(pB), @@ -106666,9 +108086,9 @@ SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA, Expr *pB, int iTab){ ** non-NULL if pNN is not NULL */ static int exprImpliesNotNull( - Parse *pParse, /* Parsing context */ - Expr *p, /* The expression to be checked */ - Expr *pNN, /* The expression that is NOT NULL */ + const Parse *pParse,/* Parsing context */ + const Expr *p, /* The expression to be checked */ + const Expr *pNN, /* The expression that is NOT NULL */ int iTab, /* Table being evaluated */ int seenNot /* Return true only if p can be any non-NULL value */ ){ @@ -106680,12 +108100,13 @@ static int exprImpliesNotNull( switch( p->op ){ case TK_IN: { if( seenNot && ExprHasProperty(p, EP_xIsSelect) ) return 0; - assert( ExprHasProperty(p,EP_xIsSelect) - || (p->x.pList!=0 && p->x.pList->nExpr>0) ); + assert( ExprUseXSelect(p) || (p->x.pList!=0 && p->x.pList->nExpr>0) ); return exprImpliesNotNull(pParse, p->pLeft, pNN, iTab, 1); } case TK_BETWEEN: { - ExprList *pList = p->x.pList; + ExprList *pList; + assert( ExprUseXList(p) ); + pList = p->x.pList; assert( pList!=0 ); assert( pList->nExpr==2 ); if( seenNot ) return 0; @@ -106761,7 +108182,12 @@ static int exprImpliesNotNull( ** improvement. Returning false might cause a performance reduction, but ** it will always give the correct answer and is hence always safe. */ -SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Parse *pParse, Expr *pE1, Expr *pE2, int iTab){ +SQLITE_PRIVATE int sqlite3ExprImpliesExpr( + const Parse *pParse, + const Expr *pE1, + const Expr *pE2, + int iTab +){ if( sqlite3ExprCompare(pParse, pE1, pE2, iTab)==0 ){ return 1; } @@ -106857,10 +108283,14 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){ testcase( pExpr->op==TK_GE ); /* The y.pTab=0 assignment in wherecode.c always happens after the ** impliesNotNullRow() test */ - if( (pLeft->op==TK_COLUMN && ALWAYS(pLeft->y.pTab!=0) - && IsVirtual(pLeft->y.pTab)) - || (pRight->op==TK_COLUMN && ALWAYS(pRight->y.pTab!=0) - && IsVirtual(pRight->y.pTab)) + assert( pLeft->op!=TK_COLUMN || ExprUseYTab(pLeft) ); + assert( pRight->op!=TK_COLUMN || ExprUseYTab(pRight) ); + if( (pLeft->op==TK_COLUMN + && pLeft->y.pTab!=0 + && IsVirtual(pLeft->y.pTab)) + || (pRight->op==TK_COLUMN + && pRight->y.pTab!=0 + && IsVirtual(pRight->y.pTab)) ){ return WRC_Prune; } @@ -106969,88 +108399,125 @@ SQLITE_PRIVATE int sqlite3ExprCoveredByIndex( } -/* -** An instance of the following structure is used by the tree walker -** to count references to table columns in the arguments of an -** aggregate function, in order to implement the -** sqlite3FunctionThisSrc() routine. -*/ -struct SrcCount { - SrcList *pSrc; /* One particular FROM clause in a nested query */ - int iSrcInner; /* Smallest cursor number in this context */ - int nThis; /* Number of references to columns in pSrcList */ - int nOther; /* Number of references to columns in other FROM clauses */ +/* Structure used to pass information throught the Walker in order to +** implement sqlite3ReferencesSrcList(). +*/ +struct RefSrcList { + sqlite3 *db; /* Database connection used for sqlite3DbRealloc() */ + SrcList *pRef; /* Looking for references to these tables */ + i64 nExclude; /* Number of tables to exclude from the search */ + int *aiExclude; /* Cursor IDs for tables to exclude from the search */ }; /* -** xSelect callback for sqlite3FunctionUsesThisSrc(). If this is the first -** SELECT with a FROM clause encountered during this iteration, set -** SrcCount.iSrcInner to the cursor number of the leftmost object in -** the FROM cause. +** Walker SELECT callbacks for sqlite3ReferencesSrcList(). +** +** When entering a new subquery on the pExpr argument, add all FROM clause +** entries for that subquery to the exclude list. +** +** When leaving the subquery, remove those entries from the exclude list. */ -static int selectSrcCount(Walker *pWalker, Select *pSel){ - struct SrcCount *p = pWalker->u.pSrcCount; - if( p->iSrcInner==0x7FFFFFFF && ALWAYS(pSel->pSrc) && pSel->pSrc->nSrc ){ - pWalker->u.pSrcCount->iSrcInner = pSel->pSrc->a[0].iCursor; +static int selectRefEnter(Walker *pWalker, Select *pSelect){ + struct RefSrcList *p = pWalker->u.pRefSrcList; + SrcList *pSrc = pSelect->pSrc; + i64 i, j; + int *piNew; + if( pSrc->nSrc==0 ) return WRC_Continue; + j = p->nExclude; + p->nExclude += pSrc->nSrc; + piNew = sqlite3DbRealloc(p->db, p->aiExclude, p->nExclude*sizeof(int)); + if( piNew==0 ){ + p->nExclude = 0; + return WRC_Abort; + }else{ + p->aiExclude = piNew; + } + for(i=0; inSrc; i++, j++){ + p->aiExclude[j] = pSrc->a[i].iCursor; } return WRC_Continue; } +static void selectRefLeave(Walker *pWalker, Select *pSelect){ + struct RefSrcList *p = pWalker->u.pRefSrcList; + SrcList *pSrc = pSelect->pSrc; + if( p->nExclude ){ + assert( p->nExclude>=pSrc->nSrc ); + p->nExclude -= pSrc->nSrc; + } +} -/* -** Count the number of references to columns. +/* This is the Walker EXPR callback for sqlite3ReferencesSrcList(). +** +** Set the 0x01 bit of pWalker->eCode if there is a reference to any +** of the tables shown in RefSrcList.pRef. +** +** Set the 0x02 bit of pWalker->eCode if there is a reference to a +** table is in neither RefSrcList.pRef nor RefSrcList.aiExclude. */ -static int exprSrcCount(Walker *pWalker, Expr *pExpr){ - /* There was once a NEVER() on the second term on the grounds that - ** sqlite3FunctionUsesThisSrc() was always called before - ** sqlite3ExprAnalyzeAggregates() and so the TK_COLUMNs have not yet - ** been converted into TK_AGG_COLUMN. But this is no longer true due - ** to window functions - sqlite3WindowRewrite() may now indirectly call - ** FunctionUsesThisSrc() when creating a new sub-select. */ - if( pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN ){ +static int exprRefToSrcList(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_COLUMN + || pExpr->op==TK_AGG_COLUMN + ){ int i; - struct SrcCount *p = pWalker->u.pSrcCount; - SrcList *pSrc = p->pSrc; + struct RefSrcList *p = pWalker->u.pRefSrcList; + SrcList *pSrc = p->pRef; int nSrc = pSrc ? pSrc->nSrc : 0; for(i=0; iiTable==pSrc->a[i].iCursor ) break; + if( pExpr->iTable==pSrc->a[i].iCursor ){ + pWalker->eCode |= 1; + return WRC_Continue; + } } - if( inThis++; - }else if( pExpr->iTableiSrcInner ){ - /* In a well-formed parse tree (no name resolution errors), - ** TK_COLUMN nodes with smaller Expr.iTable values are in an - ** outer context. Those are the only ones to count as "other" */ - p->nOther++; + for(i=0; inExclude && p->aiExclude[i]!=pExpr->iTable; i++){} + if( i>=p->nExclude ){ + pWalker->eCode |= 2; } } return WRC_Continue; } /* -** Determine if any of the arguments to the pExpr Function reference -** pSrcList. Return true if they do. Also return true if the function -** has no arguments or has only constant arguments. Return false if pExpr -** references columns but not columns of tables found in pSrcList. +** Check to see if pExpr references any tables in pSrcList. +** Possible return values: +** +** 1 pExpr does references a table in pSrcList. +** +** 0 pExpr references some table that is not defined in either +** pSrcList or in subqueries of pExpr itself. +** +** -1 pExpr only references no tables at all, or it only +** references tables defined in subqueries of pExpr itself. +** +** As currently used, pExpr is always an aggregate function call. That +** fact is exploited for efficiency. */ -SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr *pExpr, SrcList *pSrcList){ +SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList *pSrcList){ Walker w; - struct SrcCount cnt; - assert( pExpr->op==TK_AGG_FUNCTION ); + struct RefSrcList x; memset(&w, 0, sizeof(w)); - w.xExprCallback = exprSrcCount; - w.xSelectCallback = selectSrcCount; - w.u.pSrcCount = &cnt; - cnt.pSrc = pSrcList; - cnt.iSrcInner = (pSrcList&&pSrcList->nSrc)?pSrcList->a[0].iCursor:0x7FFFFFFF; - cnt.nThis = 0; - cnt.nOther = 0; + memset(&x, 0, sizeof(x)); + w.xExprCallback = exprRefToSrcList; + w.xSelectCallback = selectRefEnter; + w.xSelectCallback2 = selectRefLeave; + w.u.pRefSrcList = &x; + x.db = pParse->db; + x.pRef = pSrcList; + assert( pExpr->op==TK_AGG_FUNCTION ); + assert( ExprUseXList(pExpr) ); sqlite3WalkExprList(&w, pExpr->x.pList); #ifndef SQLITE_OMIT_WINDOWFUNC if( ExprHasProperty(pExpr, EP_WinFunc) ){ sqlite3WalkExpr(&w, pExpr->y.pWin->pFilter); } #endif - return cnt.nThis>0 || cnt.nOther==0; + sqlite3DbFree(pParse->db, x.aiExclude); + if( w.eCode & 0x01 ){ + return 1; + }else if( w.eCode ){ + return 0; + }else{ + return -1; + } } /* @@ -107185,6 +108652,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0 ){ pCol = &pAggInfo->aCol[k]; + assert( ExprUseYTab(pExpr) ); pCol->pTab = pExpr->y.pTab; pCol->iTable = pExpr->iTable; pCol->iColumn = pExpr->iColumn; @@ -107248,7 +108716,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ pItem = &pAggInfo->aFunc[i]; pItem->pFExpr = pExpr; pItem->iMem = ++pParse->nMem; - assert( !ExprHasProperty(pExpr, EP_IntValue) ); + assert( ExprUseUToken(pExpr) ); pItem->pFunc = sqlite3FindFunction(pParse->db, pExpr->u.zToken, pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0); @@ -107463,7 +108931,7 @@ static void renameTestSchema( pParse->colNamesSet = 1; sqlite3NestedParse(pParse, "SELECT 1 " - "FROM \"%w\"." DFLT_SCHEMA_TABLE " " + "FROM \"%w\"." LEGACY_SCHEMA_TABLE " " "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" " AND sql NOT LIKE 'create virtual%%'" " AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL ", @@ -107474,7 +108942,7 @@ static void renameTestSchema( if( bTemp==0 ){ sqlite3NestedParse(pParse, "SELECT 1 " - "FROM temp." DFLT_SCHEMA_TABLE " " + "FROM temp." LEGACY_SCHEMA_TABLE " " "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" " AND sql NOT LIKE 'create virtual%%'" " AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL ", @@ -107492,14 +108960,14 @@ static void renameTestSchema( */ static void renameFixQuotes(Parse *pParse, const char *zDb, int bTemp){ sqlite3NestedParse(pParse, - "UPDATE \"%w\"." DFLT_SCHEMA_TABLE + "UPDATE \"%w\"." LEGACY_SCHEMA_TABLE " SET sql = sqlite_rename_quotefix(%Q, sql)" "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" " AND sql NOT LIKE 'create virtual%%'" , zDb, zDb ); if( bTemp==0 ){ sqlite3NestedParse(pParse, - "UPDATE temp." DFLT_SCHEMA_TABLE + "UPDATE temp." LEGACY_SCHEMA_TABLE " SET sql = sqlite_rename_quotefix('temp', sql)" "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" " AND sql NOT LIKE 'create virtual%%'" @@ -107538,9 +109006,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( const char *zTabName; /* Original name of the table */ Vdbe *v; VTable *pVTab = 0; /* Non-zero if this is a v-tab with an xRename() */ - u32 savedDbFlags; /* Saved value of db->mDbFlags */ - savedDbFlags = db->mDbFlags; if( NEVER(db->mallocFailed) ) goto exit_rename_table; assert( pSrc->nSrc==1 ); assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); @@ -107549,7 +109015,6 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( if( !pTab ) goto exit_rename_table; iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); zDb = db->aDb[iDb].zDbSName; - db->mDbFlags |= DBFLAG_PreferBuiltin; /* Get a NULL terminated version of the new table name. */ zName = sqlite3NameFromToken(db, pName); @@ -107578,7 +109043,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( } #ifndef SQLITE_OMIT_VIEW - if( pTab->pSelect ){ + if( IsView(pTab) ){ sqlite3ErrorMsg(pParse, "view %s may not be altered", pTab->zName); goto exit_rename_table; } @@ -107620,7 +109085,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( /* Rewrite all CREATE TABLE, INDEX, TRIGGER or VIEW statements in ** the schema to use the new table name. */ sqlite3NestedParse(pParse, - "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET " + "UPDATE \"%w\"." LEGACY_SCHEMA_TABLE " SET " "sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) " "WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)" "AND name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" @@ -107630,7 +109095,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( /* Update the tbl_name and name columns of the sqlite_schema table ** as required. */ sqlite3NestedParse(pParse, - "UPDATE %Q." DFLT_SCHEMA_TABLE " SET " + "UPDATE %Q." LEGACY_SCHEMA_TABLE " SET " "tbl_name = %Q, " "name = CASE " "WHEN type='table' THEN %Q " @@ -107690,7 +109155,6 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( exit_rename_table: sqlite3SrcListDelete(db, pSrc); sqlite3DbFree(db, zName); - db->mDbFlags = savedDbFlags; } /* @@ -107731,7 +109195,9 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ int r1; /* Temporary registers */ db = pParse->db; - if( pParse->nErr || db->mallocFailed ) return; + assert( db->pParse==pParse ); + if( pParse->nErr ) return; + assert( db->mallocFailed==0 ); pNew = pParse->pNewTable; assert( pNew ); @@ -107740,7 +109206,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ zDb = db->aDb[iDb].zDbSName; zTab = &pNew->zName[16]; /* Skip the "sqlite_altertab_" prefix on the name */ pCol = &pNew->aCol[pNew->nCol-1]; - pDflt = pCol->pDflt; + pDflt = sqlite3ColumnExpr(pNew, pCol); pTab = sqlite3FindTable(db, zTab, zDb); assert( pTab ); @@ -107774,7 +109240,8 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ if( pDflt && pDflt->pLeft->op==TK_NULL ){ pDflt = 0; } - if( (db->flags&SQLITE_ForeignKeys) && pNew->pFKey && pDflt ){ + assert( IsOrdinaryTable(pNew) ); + if( (db->flags&SQLITE_ForeignKeys) && pNew->u.tab.pFKey && pDflt ){ sqlite3ErrorIfNotEmpty(pParse, zDb, zTab, "Cannot add a REFERENCES column with non-NULL default value"); } @@ -107811,31 +109278,30 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ zCol = sqlite3DbStrNDup(db, (char*)pColDef->z, pColDef->n); if( zCol ){ char *zEnd = &zCol[pColDef->n-1]; - u32 savedDbFlags = db->mDbFlags; while( zEnd>zCol && (*zEnd==';' || sqlite3Isspace(*zEnd)) ){ *zEnd-- = '\0'; } - db->mDbFlags |= DBFLAG_PreferBuiltin; /* substr() operations on characters, but addColOffset is in bytes. So we ** have to use printf() to translate between these units: */ + assert( IsOrdinaryTable(pTab) ); + assert( IsOrdinaryTable(pNew) ); sqlite3NestedParse(pParse, - "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET " + "UPDATE \"%w\"." LEGACY_SCHEMA_TABLE " SET " "sql = printf('%%.%ds, ',sql) || %Q" " || substr(sql,1+length(printf('%%.%ds',sql))) " "WHERE type = 'table' AND name = %Q", - zDb, pNew->addColOffset, zCol, pNew->addColOffset, + zDb, pNew->u.tab.addColOffset, zCol, pNew->u.tab.addColOffset, zTab ); sqlite3DbFree(db, zCol); - db->mDbFlags = savedDbFlags; } - /* Make sure the schema version is at least 3. But do not upgrade - ** from less than 3 to 4, as that will corrupt any preexisting DESC - ** index. - */ v = sqlite3GetVdbe(pParse); if( v ){ + /* Make sure the schema version is at least 3. But do not upgrade + ** from less than 3 to 4, as that will corrupt any preexisting DESC + ** index. + */ r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT); sqlite3VdbeUsesBtree(v, iDb); @@ -107844,10 +109310,25 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, 3); sqlite3ReleaseTempReg(pParse, r1); - } - /* Reload the table definition */ - renameReloadSchema(pParse, iDb, INITFLAG_AlterRename); + /* Reload the table definition */ + renameReloadSchema(pParse, iDb, INITFLAG_AlterAdd); + + /* Verify that constraints are still satisfied */ + if( pNew->pCheck!=0 + || (pCol->notNull && (pCol->colFlags & COLFLAG_GENERATED)!=0) + ){ + sqlite3NestedParse(pParse, + "SELECT CASE WHEN quick_check GLOB 'CHECK*'" + " THEN raise(ABORT,'CHECK constraint failed')" + " ELSE raise(ABORT,'NOT NULL constraint failed')" + " END" + " FROM pragma_quick_check(%Q,%Q)" + " WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'", + zTab, zDb + ); + } + } } /* @@ -107888,7 +109369,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ #endif /* Make sure this is not an attempt to ALTER a view. */ - if( pTab->pSelect ){ + if( IsView(pTab) ){ sqlite3ErrorMsg(pParse, "Cannot add a column to a view"); goto exit_begin_add_column; } @@ -107897,7 +109378,8 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ } sqlite3MayAbort(pParse); - assert( pTab->addColOffset>0 ); + assert( IsOrdinaryTable(pTab) ); + assert( pTab->u.tab.addColOffset>0 ); iDb = sqlite3SchemaToIndex(db, pTab->pSchema); /* Put a copy of the Table struct in Parse.pNewTable for the @@ -107924,13 +109406,13 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); for(i=0; inCol; i++){ Column *pCol = &pNew->aCol[i]; - pCol->zName = sqlite3DbStrDup(db, pCol->zName); - pCol->hName = sqlite3StrIHash(pCol->zName); - pCol->zColl = 0; - pCol->pDflt = 0; + pCol->zCnName = sqlite3DbStrDup(db, pCol->zCnName); + pCol->hName = sqlite3StrIHash(pCol->zCnName); } + assert( IsOrdinaryTable(pNew) ); + pNew->u.tab.pDfltList = sqlite3ExprListDup(db, pTab->u.tab.pDfltList, 0); pNew->pSchema = db->aDb[iDb].pSchema; - pNew->addColOffset = pTab->addColOffset; + pNew->u.tab.addColOffset = pTab->u.tab.addColOffset; pNew->nTabRef = 1; exit_begin_add_column: @@ -107950,7 +109432,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ static int isRealTable(Parse *pParse, Table *pTab, int bDrop){ const char *zType = 0; #ifndef SQLITE_OMIT_VIEW - if( pTab->pSelect ){ + if( IsView(pTab) ){ zType = "view"; } #endif @@ -108017,10 +109499,10 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( zOld = sqlite3NameFromToken(db, pOld); if( !zOld ) goto exit_rename_column; for(iCol=0; iColnCol; iCol++){ - if( 0==sqlite3StrICmp(pTab->aCol[iCol].zName, zOld) ) break; + if( 0==sqlite3StrICmp(pTab->aCol[iCol].zCnName, zOld) ) break; } if( iCol==pTab->nCol ){ - sqlite3ErrorMsg(pParse, "no such column: \"%s\"", zOld); + sqlite3ErrorMsg(pParse, "no such column: \"%T\"", pOld); goto exit_rename_column; } @@ -108038,18 +109520,17 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( assert( pNew->n>0 ); bQuote = sqlite3Isquote(pNew->z[0]); sqlite3NestedParse(pParse, - "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET " + "UPDATE \"%w\"." LEGACY_SCHEMA_TABLE " SET " "sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) " "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X' " - " AND (type != 'index' OR tbl_name = %Q)" - " AND sql NOT LIKE 'create virtual%%'", + " AND (type != 'index' OR tbl_name = %Q)", zDb, zDb, pTab->zName, iCol, zNew, bQuote, iSchema==1, pTab->zName ); sqlite3NestedParse(pParse, - "UPDATE temp." DFLT_SCHEMA_TABLE " SET " + "UPDATE temp." LEGACY_SCHEMA_TABLE " SET " "sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) " "WHERE type IN ('trigger', 'view')", zDb, pTab->zName, iCol, zNew, bQuote @@ -108084,7 +109565,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( ** the parse tree. */ struct RenameToken { - void *p; /* Parse tree element created by token t */ + const void *p; /* Parse tree element created by token t */ Token t; /* The token that created parse tree element p */ RenameToken *pNext; /* Next is a list of all RenameToken objects */ }; @@ -108126,9 +109607,11 @@ struct RenameCtx { ** Technically, as x no longer points into a valid object or to the byte ** following a valid object, it may not be used in comparison operations. */ -static void renameTokenCheckAll(Parse *pParse, void *pPtr){ - if( pParse->nErr==0 && pParse->db->mallocFailed==0 ){ - RenameToken *p; +static void renameTokenCheckAll(Parse *pParse, const void *pPtr){ + assert( pParse==pParse->db->pParse ); + assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 ); + if( pParse->nErr==0 ){ + const RenameToken *p; u8 i = 0; for(p=pParse->pRename; p; p=p->pNext){ if( p->p ){ @@ -108154,7 +109637,11 @@ static void renameTokenCheckAll(Parse *pParse, void *pPtr){ ** with tail recursion in tokenExpr() routine, for a small performance ** improvement. */ -SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse *pParse, void *pPtr, Token *pToken){ +SQLITE_PRIVATE const void *sqlite3RenameTokenMap( + Parse *pParse, + const void *pPtr, + const Token *pToken +){ RenameToken *pNew; assert( pPtr || pParse->db->mallocFailed ); renameTokenCheckAll(pParse, pPtr); @@ -108176,7 +109663,7 @@ SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse *pParse, void *pPtr, Token *pTo ** with parse tree element pFrom. This function remaps the associated token ** to parse tree element pTo. */ -SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse *pParse, void *pTo, void *pFrom){ +SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse *pParse, const void *pTo, const void *pFrom){ RenameToken *p; renameTokenCheckAll(pParse, pTo); for(p=pParse->pRename; p; p=p->pNext){ @@ -108192,7 +109679,10 @@ SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse *pParse, void *pTo, void *pFro */ static int renameUnmapExprCb(Walker *pWalker, Expr *pExpr){ Parse *pParse = pWalker->pParse; - sqlite3RenameTokenRemap(pParse, 0, (void*)pExpr); + sqlite3RenameTokenRemap(pParse, 0, (const void*)pExpr); + if( ExprUseYTab(pExpr) ){ + sqlite3RenameTokenRemap(pParse, 0, (const void*)&pExpr->y.pTab); + } return WRC_Continue; } @@ -108222,6 +109712,7 @@ static void renameWalkWith(Walker *pWalker, Select *pSelect){ memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; if( pCopy ) sqlite3SelectPrep(sNC.pParse, p, &sNC); + if( sNC.pParse->db->mallocFailed ) return; sqlite3WalkSelect(pWalker, p); sqlite3RenameExprlistUnmap(pParse, pWith->a[i].pCols); } @@ -108236,12 +109727,12 @@ static void renameWalkWith(Walker *pWalker, Select *pSelect){ */ static void unmapColumnIdlistNames( Parse *pParse, - IdList *pIdList + const IdList *pIdList ){ if( pIdList ){ int ii; for(ii=0; iinId; ii++){ - sqlite3RenameTokenRemap(pParse, 0, (void*)pIdList->a[ii].zName); + sqlite3RenameTokenRemap(pParse, 0, (const void*)pIdList->a[ii].zName); } } } @@ -108253,9 +109744,9 @@ static int renameUnmapSelectCb(Walker *pWalker, Select *p){ Parse *pParse = pWalker->pParse; int i; if( pParse->nErr ) return WRC_Abort; + testcase( p->selFlags & SF_View ); + testcase( p->selFlags & SF_CopyCte ); if( p->selFlags & (SF_View|SF_CopyCte) ){ - testcase( p->selFlags & SF_View ); - testcase( p->selFlags & SF_CopyCte ); return WRC_Prune; } if( ALWAYS(p->pEList) ){ @@ -108270,7 +109761,7 @@ static int renameUnmapSelectCb(Walker *pWalker, Select *p){ SrcList *pSrc = p->pSrc; for(i=0; inSrc; i++){ sqlite3RenameTokenRemap(pParse, 0, (void*)pSrc->a[i].zName); - if( sqlite3WalkExpr(pWalker, pSrc->a[i].pOn) ) return WRC_Abort; + sqlite3WalkExpr(pWalker, pSrc->a[i].pOn); unmapColumnIdlistNames(pParse, pSrc->a[i].pUsing); } } @@ -108338,7 +109829,7 @@ static void renameTokenFree(sqlite3 *db, RenameToken *pToken){ static RenameToken *renameTokenFind( Parse *pParse, struct RenameCtx *pCtx, - void *pPtr + const void *pPtr ){ RenameToken **pp; if( NEVER(pPtr==0) ){ @@ -108392,6 +109883,7 @@ static int renameColumnExprCb(Walker *pWalker, Expr *pExpr){ renameTokenFind(pWalker->pParse, p, (void*)pExpr); }else if( pExpr->op==TK_COLUMN && pExpr->iColumn==p->iCol + && ALWAYS(ExprUseYTab(pExpr)) && p->pTab==pExpr->y.pTab ){ renameTokenFind(pWalker->pParse, p, (void*)pExpr); @@ -108440,12 +109932,12 @@ static void renameColumnParseError( const char *zN = (const char*)sqlite3_value_text(pObject); char *zErr; - zErr = sqlite3_mprintf("error in %s %s%s%s: %s", + zErr = sqlite3MPrintf(pParse->db, "error in %s %s%s%s: %s", zT, zN, (zWhen[0] ? " " : ""), zWhen, pParse->zErrMsg ); sqlite3_result_error(pCtx, zErr, -1); - sqlite3_free(zErr); + sqlite3DbFree(pParse->db, zErr); } /* @@ -108457,18 +109949,18 @@ static void renameColumnParseError( static void renameColumnElistNames( Parse *pParse, RenameCtx *pCtx, - ExprList *pEList, + const ExprList *pEList, const char *zOld ){ if( pEList ){ int i; for(i=0; inExpr; i++){ - char *zName = pEList->a[i].zEName; + const char *zName = pEList->a[i].zEName; if( ALWAYS(pEList->a[i].eEName==ENAME_NAME) && ALWAYS(zName!=0) && 0==sqlite3_stricmp(zName, zOld) ){ - renameTokenFind(pParse, pCtx, (void*)zName); + renameTokenFind(pParse, pCtx, (const void*)zName); } } } @@ -108482,15 +109974,15 @@ static void renameColumnElistNames( static void renameColumnIdlistNames( Parse *pParse, RenameCtx *pCtx, - IdList *pIdList, + const IdList *pIdList, const char *zOld ){ if( pIdList ){ int i; for(i=0; inId; i++){ - char *zName = pIdList->a[i].zName; + const char *zName = pIdList->a[i].zName; if( 0==sqlite3_stricmp(zName, zOld) ){ - renameTokenFind(pParse, pCtx, (void*)zName); + renameTokenFind(pParse, pCtx, (const void*)zName); } } } @@ -108509,24 +110001,22 @@ static int renameParseSql( int bTemp /* True if SQL is from temp schema */ ){ int rc; - char *zErr = 0; + sqlite3ParseObjectInit(p, db); + if( zSql==0 ){ + return SQLITE_NOMEM; + } + if( sqlite3StrNICmp(zSql,"CREATE ",7)!=0 ){ + return SQLITE_CORRUPT_BKPT; + } db->init.iDb = bTemp ? 1 : sqlite3FindDbName(db, zDb); - - /* Parse the SQL statement passed as the first argument. If no error - ** occurs and the parse does not result in a new table, index or - ** trigger object, the database must be corrupt. */ - memset(p, 0, sizeof(Parse)); p->eParseMode = PARSE_MODE_RENAME; p->db = db; p->nQueryLoop = 1; - rc = zSql ? sqlite3RunParser(p, zSql, &zErr) : SQLITE_NOMEM; - assert( p->zErrMsg==0 ); - assert( rc!=SQLITE_OK || zErr==0 ); - p->zErrMsg = zErr; + rc = sqlite3RunParser(p, zSql); if( db->mallocFailed ) rc = SQLITE_NOMEM; if( rc==SQLITE_OK - && p->pNewTable==0 && p->pNewIndex==0 && p->pNewTrigger==0 + && NEVER(p->pNewTable==0 && p->pNewIndex==0 && p->pNewTrigger==0) ){ rc = SQLITE_CORRUPT_BKPT; } @@ -108716,6 +110206,9 @@ static int renameResolveTrigger(Parse *pParse){ } } } + if( rc==SQLITE_OK && db->mallocFailed ){ + rc = SQLITE_NOMEM; + } sNC.pSrcList = pSrc; if( rc==SQLITE_OK && pStep->pWhere ){ rc = sqlite3ResolveExprNames(&sNC, pStep->pWhere); @@ -108801,13 +110294,13 @@ static void renameParseCleanup(Parse *pParse){ sqlite3DeleteTrigger(db, pParse->pNewTrigger); sqlite3DbFree(db, pParse->zErrMsg); renameTokenFree(db, pParse->pRename); - sqlite3ParserReset(pParse); + sqlite3ParseObjectReset(pParse); } /* ** SQL function: ** -** sqlite_rename_column(zSql, iCol, bQuote, zNew, zTable, zOld) +** sqlite_rename_column(SQL,TYPE,OBJ,DB,TABLE,COL,NEWNAME,QUOTE,TEMP) ** ** 0. zSql: SQL statement to rewrite ** 1. type: Type of object ("table", "view" etc.) @@ -108825,7 +110318,8 @@ static void renameParseCleanup(Parse *pParse){ ** ** This function is used internally by the ALTER TABLE RENAME COLUMN command. ** It is only accessible to SQL created using sqlite3NestedParse(). It is -** not reachable from ordinary SQL passed into sqlite3_prepare(). +** not reachable from ordinary SQL passed into sqlite3_prepare() unless the +** SQLITE_TESTCTRL_INTERNAL_FUNCTIONS test setting is enabled. */ static void renameColumnFunc( sqlite3_context *context, @@ -108863,7 +110357,7 @@ static void renameColumnFunc( sqlite3BtreeLeaveAll(db); return; } - zOld = pTab->aCol[iCol].zName; + zOld = pTab->aCol[iCol].zCnName; memset(&sCtx, 0, sizeof(sCtx)); sCtx.iCol = ((iCol==pTab->iPKey) ? -1 : iCol); @@ -108882,8 +110376,8 @@ static void renameColumnFunc( sCtx.pTab = pTab; if( rc!=SQLITE_OK ) goto renameColumnFunc_done; if( sParse.pNewTable ){ - Select *pSelect = sParse.pNewTable->pSelect; - if( pSelect ){ + if( IsView(sParse.pNewTable) ){ + Select *pSelect = sParse.pNewTable->u.view.pSelect; pSelect->selFlags &= ~SF_View; sParse.rc = SQLITE_OK; sqlite3SelectPrep(&sParse, pSelect, 0); @@ -108892,16 +110386,15 @@ static void renameColumnFunc( sqlite3WalkSelect(&sWalker, pSelect); } if( rc!=SQLITE_OK ) goto renameColumnFunc_done; - }else{ + }else if( IsOrdinaryTable(sParse.pNewTable) ){ /* A regular table */ int bFKOnly = sqlite3_stricmp(zTable, sParse.pNewTable->zName); FKey *pFKey; - assert( sParse.pNewTable->pSelect==0 ); sCtx.pTab = sParse.pNewTable; if( bFKOnly==0 ){ if( iColnCol ){ renameTokenFind( - &sParse, &sCtx, (void*)sParse.pNewTable->aCol[iCol].zName + &sParse, &sCtx, (void*)sParse.pNewTable->aCol[iCol].zCnName ); } if( sCtx.iCol<0 ){ @@ -108916,12 +110409,15 @@ static void renameColumnFunc( } #ifndef SQLITE_OMIT_GENERATED_COLUMNS for(i=0; inCol; i++){ - sqlite3WalkExpr(&sWalker, sParse.pNewTable->aCol[i].pDflt); + Expr *pExpr = sqlite3ColumnExpr(sParse.pNewTable, + &sParse.pNewTable->aCol[i]); + sqlite3WalkExpr(&sWalker, pExpr); } #endif } - for(pFKey=sParse.pNewTable->pFKey; pFKey; pFKey=pFKey->pNextFrom){ + assert( IsOrdinaryTable(sParse.pNewTable) ); + for(pFKey=sParse.pNewTable->u.tab.pFKey; pFKey; pFKey=pFKey->pNextFrom){ for(i=0; inCol; i++){ if( bFKOnly==0 && pFKey->aCol[i].iFrom==iCol ){ renameTokenFind(&sParse, &sCtx, (void*)&pFKey->aCol[i]); @@ -108972,7 +110468,9 @@ static void renameColumnFunc( renameColumnFunc_done: if( rc!=SQLITE_OK ){ - if( sParse.zErrMsg ){ + if( rc==SQLITE_ERROR && sqlite3WritableSchema(db) ){ + sqlite3_result_value(context, argv[0]); + }else if( sParse.zErrMsg ){ renameColumnParseError(context, "", argv[1], argv[2], &sParse); }else{ sqlite3_result_error_code(context, rc); @@ -108992,7 +110490,10 @@ static void renameColumnFunc( */ static int renameTableExprCb(Walker *pWalker, Expr *pExpr){ RenameCtx *p = pWalker->u.pRename; - if( pExpr->op==TK_COLUMN && p->pTab==pExpr->y.pTab ){ + if( pExpr->op==TK_COLUMN + && ALWAYS(ExprUseYTab(pExpr)) + && p->pTab==pExpr->y.pTab + ){ renameTokenFind(pWalker->pParse, p, (void*)&pExpr->y.pTab); } return WRC_Continue; @@ -109087,28 +110588,31 @@ static void renameTableFunc( if( sParse.pNewTable ){ Table *pTab = sParse.pNewTable; - if( pTab->pSelect ){ + if( IsView(pTab) ){ if( isLegacy==0 ){ - Select *pSelect = pTab->pSelect; + Select *pSelect = pTab->u.view.pSelect; NameContext sNC; memset(&sNC, 0, sizeof(sNC)); sNC.pParse = &sParse; assert( pSelect->selFlags & SF_View ); pSelect->selFlags &= ~SF_View; - sqlite3SelectPrep(&sParse, pTab->pSelect, &sNC); + sqlite3SelectPrep(&sParse, pTab->u.view.pSelect, &sNC); if( sParse.nErr ){ rc = sParse.rc; }else{ - sqlite3WalkSelect(&sWalker, pTab->pSelect); + sqlite3WalkSelect(&sWalker, pTab->u.view.pSelect); } } }else{ /* Modify any FK definitions to point to the new table. */ #ifndef SQLITE_OMIT_FOREIGN_KEY - if( isLegacy==0 || (db->flags & SQLITE_ForeignKeys) ){ + if( (isLegacy==0 || (db->flags & SQLITE_ForeignKeys)) + && !IsVirtual(pTab) + ){ FKey *pFKey; - for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ + assert( IsOrdinaryTable(pTab) ); + for(pFKey=pTab->u.tab.pFKey; pFKey; pFKey=pFKey->pNextFrom){ if( sqlite3_stricmp(pFKey->zTo, zOld)==0 ){ renameTokenFind(&sParse, &sCtx, (void*)pFKey->zTo); } @@ -109165,7 +110669,9 @@ static void renameTableFunc( rc = renameEditSql(context, &sCtx, zInput, zNew, bQuote); } if( rc!=SQLITE_OK ){ - if( sParse.zErrMsg ){ + if( rc==SQLITE_ERROR && sqlite3WritableSchema(db) ){ + sqlite3_result_value(context, argv[3]); + }else if( sParse.zErrMsg ){ renameColumnParseError(context, "", argv[1], argv[2], &sParse); }else{ sqlite3_result_error_code(context, rc); @@ -109185,15 +110691,15 @@ static void renameTableFunc( static int renameQuotefixExprCb(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_STRING && (pExpr->flags & EP_DblQuoted) ){ - renameTokenFind(pWalker->pParse, pWalker->u.pRename, (void*)pExpr); + renameTokenFind(pWalker->pParse, pWalker->u.pRename, (const void*)pExpr); } return WRC_Continue; } -/* -** The implementation of an SQL scalar function that rewrites DDL statements -** so that any string literals that use double-quotes are modified so that -** they use single quotes. +/* SQL function: sqlite_rename_quotefix(DB,SQL) +** +** Rewrite the DDL statement "SQL" so that any string literals that use +** double-quotes use single quotes instead. ** ** Two arguments must be passed: ** @@ -109212,6 +110718,10 @@ static int renameQuotefixExprCb(Walker *pWalker, Expr *pExpr){ ** returns the string: ** ** CREATE VIEW v1 AS SELECT "a", 'string' FROM t1 +** +** If there is a error in the input SQL, then raise an error, except +** if PRAGMA writable_schema=ON, then just return the input string +** unmodified following an error. */ static void renameQuotefixFunc( sqlite3_context *context, @@ -109248,8 +110758,8 @@ static void renameQuotefixFunc( sWalker.u.pRename = &sCtx; if( sParse.pNewTable ){ - Select *pSelect = sParse.pNewTable->pSelect; - if( pSelect ){ + if( IsView(sParse.pNewTable) ){ + Select *pSelect = sParse.pNewTable->u.view.pSelect; pSelect->selFlags &= ~SF_View; sParse.rc = SQLITE_OK; sqlite3SelectPrep(&sParse, pSelect, 0); @@ -109262,7 +110772,9 @@ static void renameQuotefixFunc( sqlite3WalkExprList(&sWalker, sParse.pNewTable->pCheck); #ifndef SQLITE_OMIT_GENERATED_COLUMNS for(i=0; inCol; i++){ - sqlite3WalkExpr(&sWalker, sParse.pNewTable->aCol[i].pDflt); + sqlite3WalkExpr(&sWalker, + sqlite3ColumnExpr(sParse.pNewTable, + &sParse.pNewTable->aCol[i])); } #endif /* SQLITE_OMIT_GENERATED_COLUMNS */ } @@ -109284,7 +110796,11 @@ static void renameQuotefixFunc( renameTokenFree(db, sCtx.pList); } if( rc!=SQLITE_OK ){ - sqlite3_result_error_code(context, rc); + if( sqlite3WritableSchema(db) && rc==SQLITE_ERROR ){ + sqlite3_result_value(context, argv[1]); + }else{ + sqlite3_result_error_code(context, rc); + } } renameParseCleanup(&sParse); } @@ -109296,7 +110812,8 @@ static void renameQuotefixFunc( sqlite3BtreeLeaveAll(db); } -/* +/* Function: sqlite_rename_test(DB,SQL,TYPE,NAME,ISTEMP,WHEN,DQS) +** ** An SQL user function that checks that there are no parse or symbol ** resolution problems in a CREATE TRIGGER|TABLE|VIEW|INDEX statement. ** After an ALTER TABLE .. RENAME operation is performed and the schema @@ -109311,11 +110828,13 @@ static void renameQuotefixFunc( ** 5: "when" part of error message. ** 6: True to disable the DQS quirk when parsing SQL. ** -** Unless it finds an error, this function normally returns NULL. However, it -** returns integer value 1 if: +** The return value is computed as follows: ** -** * the SQL argument creates a trigger, and -** * the table that the trigger is attached to is in database zDb. +** A. If an error is seen and not in PRAGMA writable_schema=ON mode, +** then raise the error. +** B. Else if a trigger is created and the the table that the trigger is +** attached to is in database zDb, then return 1. +** C. Otherwise return NULL. */ static void renameTableTest( sqlite3_context *context, @@ -109345,11 +110864,11 @@ static void renameTableTest( rc = renameParseSql(&sParse, zDb, db, zInput, bTemp); db->flags |= (flags & (SQLITE_DqsDML|SQLITE_DqsDDL)); if( rc==SQLITE_OK ){ - if( isLegacy==0 && sParse.pNewTable && sParse.pNewTable->pSelect ){ + if( isLegacy==0 && sParse.pNewTable && IsView(sParse.pNewTable) ){ NameContext sNC; memset(&sNC, 0, sizeof(sNC)); sNC.pParse = &sParse; - sqlite3SelectPrep(&sParse, sParse.pNewTable->pSelect, &sNC); + sqlite3SelectPrep(&sParse, sParse.pNewTable->u.view.pSelect, &sNC); if( sParse.nErr ) rc = sParse.rc; } @@ -109360,12 +110879,16 @@ static void renameTableTest( if( rc==SQLITE_OK ){ int i1 = sqlite3SchemaToIndex(db, sParse.pNewTrigger->pTabSchema); int i2 = sqlite3FindDbName(db, zDb); - if( i1==i2 ) sqlite3_result_int(context, 1); + if( i1==i2 ){ + /* Handle output case B */ + sqlite3_result_int(context, 1); + } } } } - if( rc!=SQLITE_OK && zWhen ){ + if( rc!=SQLITE_OK && zWhen && !sqlite3WritableSchema(db) ){ + /* Output case A */ renameColumnParseError(context, zWhen, argv[2], argv[3],&sParse); } renameParseCleanup(&sParse); @@ -109420,13 +110943,14 @@ static void dropColumnFunc( goto drop_column_done; } - pCol = renameTokenFind(&sParse, 0, (void*)pTab->aCol[iCol].zName); + pCol = renameTokenFind(&sParse, 0, (void*)pTab->aCol[iCol].zCnName); if( iColnCol-1 ){ RenameToken *pEnd; - pEnd = renameTokenFind(&sParse, 0, (void*)pTab->aCol[iCol+1].zName); + pEnd = renameTokenFind(&sParse, 0, (void*)pTab->aCol[iCol+1].zCnName); zEnd = (const char*)pEnd->t.z; }else{ - zEnd = (const char*)&zSql[pTab->addColOffset]; + assert( IsOrdinaryTable(pTab) ); + zEnd = (const char*)&zSql[pTab->u.tab.addColOffset]; while( ALWAYS(pCol->t.z[0]!=0) && pCol->t.z[0]!=',' ) pCol->t.z--; } @@ -109452,7 +110976,7 @@ static void dropColumnFunc( ** statement. Argument pSrc contains the possibly qualified name of the ** table being edited, and token pName the name of the column to drop. */ -SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, Token *pName){ +SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, const Token *pName){ sqlite3 *db = pParse->db; /* Database handle */ Table *pTab; /* Table to modify */ int iDb; /* Index of db containing pTab in aDb[] */ @@ -109480,7 +111004,7 @@ SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, Token * } iCol = sqlite3ColumnIndex(pTab, zCol); if( iCol<0 ){ - sqlite3ErrorMsg(pParse, "no such column: \"%s\"", zCol); + sqlite3ErrorMsg(pParse, "no such column: \"%T\"", pName); goto exit_drop_column; } @@ -109504,10 +111028,16 @@ SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, Token * iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDb>=0 ); zDb = db->aDb[iDb].zDbSName; +#ifndef SQLITE_OMIT_AUTHORIZATION + /* Invoke the authorization callback. */ + if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, zCol) ){ + goto exit_drop_column; + } +#endif renameTestSchema(pParse, zDb, iDb==1, "", 0); renameFixQuotes(pParse, zDb, iDb==1); sqlite3NestedParse(pParse, - "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET " + "UPDATE \"%w\"." LEGACY_SCHEMA_TABLE " SET " "sql = sqlite_drop_column(%d, sql, %d) " "WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)" , zDb, iDb, iCol, pTab->zName @@ -109562,6 +111092,12 @@ SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, Token * nField++; } } + if( nField==0 ){ + /* dbsqlfuzz 5f09e7bcc78b4954d06bf9f2400d7715f48d1fef */ + pParse->nMem++; + sqlite3VdbeAddOp2(v, OP_Null, 0, reg+1); + nField = 1; + } sqlite3VdbeAddOp3(v, OP_MakeRecord, reg+1, nField, regRec); if( pPk ){ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iCur, regRec, reg+1, pPk->nKeyCol); @@ -110031,7 +111567,6 @@ static void statInit( + sizeof(tRowcnt)*3*nColUp*(nCol+mxSample); } #endif - db = sqlite3_context_db_handle(context); p = sqlite3DbMallocZero(db, n); if( p==0 ){ sqlite3_result_error_nomem(context); @@ -110450,28 +111985,19 @@ static void statGet( ** ** I = (K+D-1)/D */ - char *z; - int i; - - char *zRet = sqlite3MallocZero( (p->nKeyCol+1)*25 ); - if( zRet==0 ){ - sqlite3_result_error_nomem(context); - return; - } + sqlite3_str sStat; /* Text of the constructed "stat" line */ + int i; /* Loop counter */ - sqlite3_snprintf(24, zRet, "%llu", + sqlite3StrAccumInit(&sStat, 0, 0, 0, (p->nKeyCol+1)*100); + sqlite3_str_appendf(&sStat, "%llu", p->nSkipAhead ? (u64)p->nEst : (u64)p->nRow); - z = zRet + sqlite3Strlen30(zRet); for(i=0; inKeyCol; i++){ u64 nDistinct = p->current.anDLt[i] + 1; u64 iVal = (p->nRow + nDistinct - 1) / nDistinct; - sqlite3_snprintf(24, z, " %llu", iVal); - z += sqlite3Strlen30(z); + sqlite3_str_appendf(&sStat, " %llu", iVal); assert( p->current.anEq[i] ); } - assert( z[0]=='\0' && z>zRet ); - - sqlite3_result_text(context, zRet, -1, sqlite3_free); + sqlite3ResultStrAccum(context, &sStat); } #ifdef SQLITE_ENABLE_STAT4 else if( eCall==STAT_GET_ROWID ){ @@ -110490,6 +112016,8 @@ static void statGet( } }else{ tRowcnt *aCnt = 0; + sqlite3_str sStat; + int i; assert( p->iGetnSample ); switch( eCall ){ @@ -110501,23 +112029,12 @@ static void statGet( break; } } - - { - char *zRet = sqlite3MallocZero(p->nCol * 25); - if( zRet==0 ){ - sqlite3_result_error_nomem(context); - }else{ - int i; - char *z = zRet; - for(i=0; inCol; i++){ - sqlite3_snprintf(24, z, "%llu ", (u64)aCnt[i]); - z += sqlite3Strlen30(z); - } - assert( z[0]=='\0' && z>zRet ); - z[-1] = '\0'; - sqlite3_result_text(context, zRet, -1, sqlite3_free); - } + sqlite3StrAccumInit(&sStat, 0, 0, 0, p->nCol*100); + for(i=0; inCol; i++){ + sqlite3_str_appendf(&sStat, "%llu ", (u64)aCnt[i]); } + if( sStat.nChar ) sStat.nChar--; + sqlite3ResultStrAccum(context, &sStat); } #endif /* SQLITE_ENABLE_STAT4 */ #ifndef SQLITE_DEBUG @@ -110566,7 +112083,7 @@ static void analyzeVdbeCommentIndexWithColumnName( }else if( i==XN_EXPR ){ VdbeComment((v,"%s.expr(%d)",pIdx->zName, k)); }else{ - VdbeComment((v,"%s.%s", pIdx->zName, pIdx->pTable->aCol[i].zName)); + VdbeComment((v,"%s.%s", pIdx->zName, pIdx->pTable->aCol[i].zCnName)); } } #else @@ -110613,7 +112130,7 @@ static void analyzeOneTable( if( v==0 || NEVER(pTab==0) ){ return; } - if( pTab->tnum==0 ){ + if( !IsOrdinaryTable(pTab) ){ /* Do not gather statistics on views or virtual tables */ return; } @@ -111438,9 +112955,12 @@ static int loadStatTbl( */ static int loadStat4(sqlite3 *db, const char *zDb){ int rc = SQLITE_OK; /* Result codes from subroutines */ + const Table *pStat4; assert( db->lookaside.bDisable ); - if( sqlite3FindTable(db, "sqlite_stat4", zDb) ){ + if( (pStat4 = sqlite3FindTable(db, "sqlite_stat4", zDb))!=0 + && IsOrdinaryTable(pStat4) + ){ rc = loadStatTbl(db, "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx", "SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4", @@ -111477,6 +112997,7 @@ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ char *zSql; int rc = SQLITE_OK; Schema *pSchema = db->aDb[iDb].pSchema; + const Table *pStat1; assert( iDb>=0 && iDbnDb ); assert( db->aDb[iDb].pBt!=0 ); @@ -111499,7 +113020,9 @@ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ /* Load new statistics out of the sqlite_stat1 table */ sInfo.db = db; sInfo.zDatabase = db->aDb[iDb].zDbSName; - if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)!=0 ){ + if( (pStat1 = sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)) + && IsOrdinaryTable(pStat1) + ){ zSql = sqlite3MPrintf(db, "SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase); if( zSql==0 ){ @@ -111890,17 +113413,18 @@ static void codeAttach( sName.pParse = pParse; if( - SQLITE_OK!=(rc = resolveAttachExpr(&sName, pFilename)) || - SQLITE_OK!=(rc = resolveAttachExpr(&sName, pDbname)) || - SQLITE_OK!=(rc = resolveAttachExpr(&sName, pKey)) + SQLITE_OK!=resolveAttachExpr(&sName, pFilename) || + SQLITE_OK!=resolveAttachExpr(&sName, pDbname) || + SQLITE_OK!=resolveAttachExpr(&sName, pKey) ){ goto attach_end; } #ifndef SQLITE_OMIT_AUTHORIZATION - if( pAuthArg ){ + if( ALWAYS(pAuthArg) ){ char *zAuthArg; if( pAuthArg->op==TK_STRING ){ + assert( !ExprHasProperty(pAuthArg, EP_IntValue) ); zAuthArg = pAuthArg->u.zToken; }else{ zAuthArg = 0; @@ -112319,10 +113843,10 @@ SQLITE_PRIVATE void sqlite3AuthRead( if( iCol>=0 ){ assert( iColnCol ); - zCol = pTab->aCol[iCol].zName; + zCol = pTab->aCol[iCol].zCnName; }else if( pTab->iPKey>=0 ){ assert( pTab->iPKeynCol ); - zCol = pTab->aCol[pTab->iPKey].zName; + zCol = pTab->aCol[pTab->iPKey].zCnName; }else{ zCol = "ROWID"; } @@ -112558,11 +114082,13 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ assert( pParse->pToplevel==0 ); db = pParse->db; + assert( db->pParse==pParse ); if( pParse->nested ) return; - if( db->mallocFailed || pParse->nErr ){ - if( pParse->rc==SQLITE_OK ) pParse->rc = SQLITE_ERROR; + if( pParse->nErr ){ + if( db->mallocFailed ) pParse->rc = SQLITE_NOMEM; return; } + assert( db->mallocFailed==0 ); /* Begin by generating some termination code at the end of the ** vdbe program @@ -112585,17 +114111,22 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ int i; int reg; - addrRewind = - sqlite3VdbeAddOp1(v, OP_Rewind, pReturning->iRetCur); - VdbeCoverage(v); - reg = pReturning->iRetReg; - for(i=0; inRetCol; i++){ - sqlite3VdbeAddOp3(v, OP_Column, pReturning->iRetCur, i, reg+i); + if( NEVER(pReturning->nRetCol==0) ){ + assert( CORRUPT_DB ); + }else{ + sqlite3VdbeAddOp0(v, OP_FkCheck); + addrRewind = + sqlite3VdbeAddOp1(v, OP_Rewind, pReturning->iRetCur); + VdbeCoverage(v); + reg = pReturning->iRetReg; + for(i=0; inRetCol; i++){ + sqlite3VdbeAddOp3(v, OP_Column, pReturning->iRetCur, i, reg+i); + } + sqlite3VdbeAddOp2(v, OP_ResultRow, reg, i); + sqlite3VdbeAddOp2(v, OP_Next, pReturning->iRetCur, addrRewind+1); + VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addrRewind); } - sqlite3VdbeAddOp2(v, OP_ResultRow, reg, i); - sqlite3VdbeAddOp2(v, OP_Next, pReturning->iRetCur, addrRewind+1); - VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addrRewind); } sqlite3VdbeAddOp0(v, OP_Halt); @@ -112676,7 +114207,11 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ if( pParse->bReturning ){ Returning *pRet = pParse->u1.pReturning; - sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); + if( NEVER(pRet->nRetCol==0) ){ + assert( CORRUPT_DB ); + }else{ + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); + } } /* Finally, jump back to the beginning of the executable code. */ @@ -112686,7 +114221,9 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ /* Get the VDBE program ready for execution */ - if( v && pParse->nErr==0 && !db->mallocFailed ){ + assert( v!=0 || pParse->nErr ); + assert( db->mallocFailed==0 || pParse->nErr ); + if( pParse->nErr==0 ){ /* A minimum of one cursor is required if autoincrement is used * See ticket [a696379c1f08866] */ assert( pParse->pAinc==0 || pParse->nTab>0 ); @@ -112700,20 +114237,21 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ /* ** Run the parser and code generator recursively in order to generate ** code for the SQL statement given onto the end of the pParse context -** currently under construction. When the parser is run recursively -** this way, the final OP_Halt is not appended and other initialization -** and finalization steps are omitted because those are handling by the -** outermost parser. +** currently under construction. Notes: ** -** Not everything is nestable. This facility is designed to permit -** INSERT, UPDATE, and DELETE operations against the schema table. Use -** care if you decide to try to use this routine for some other purposes. +** * The final OP_Halt is not appended and other initialization +** and finalization steps are omitted because those are handling by the +** outermost parser. +** +** * Built-in SQL functions always take precedence over application-defined +** SQL functions. In other words, it is not possible to override a +** built-in function. */ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ va_list ap; char *zSql; - char *zErrMsg = 0; sqlite3 *db = pParse->db; + u32 savedDbFlags = db->mDbFlags; char saveBuf[PARSE_TAIL_SZ]; if( pParse->nErr ) return; @@ -112732,8 +114270,11 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ pParse->nested++; memcpy(saveBuf, PARSE_TAIL(pParse), PARSE_TAIL_SZ); memset(PARSE_TAIL(pParse), 0, PARSE_TAIL_SZ); - sqlite3RunParser(pParse, zSql, &zErrMsg); - sqlite3DbFree(db, zErrMsg); + db->mDbFlags |= DBFLAG_PreferBuiltin; + sqlite3RunParser(pParse, zSql); + sqlite3DbFree(db, pParse->zErrMsg); + pParse->zErrMsg = 0; + db->mDbFlags = savedDbFlags; sqlite3DbFree(db, zSql); memcpy(PARSE_TAIL(pParse), saveBuf, PARSE_TAIL_SZ); pParse->nested--; @@ -112790,17 +114331,17 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha p = sqlite3HashFind(&db->aDb[i].pSchema->tblHash, zName); if( p==0 && sqlite3StrNICmp(zName, "sqlite_", 7)==0 ){ if( i==1 ){ - if( sqlite3StrICmp(zName+7, &ALT_TEMP_SCHEMA_TABLE[7])==0 - || sqlite3StrICmp(zName+7, &ALT_SCHEMA_TABLE[7])==0 - || sqlite3StrICmp(zName+7, &DFLT_SCHEMA_TABLE[7])==0 + if( sqlite3StrICmp(zName+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 + || sqlite3StrICmp(zName+7, &PREFERRED_SCHEMA_TABLE[7])==0 + || sqlite3StrICmp(zName+7, &LEGACY_SCHEMA_TABLE[7])==0 ){ p = sqlite3HashFind(&db->aDb[1].pSchema->tblHash, - DFLT_TEMP_SCHEMA_TABLE); + LEGACY_TEMP_SCHEMA_TABLE); } }else{ - if( sqlite3StrICmp(zName+7, &ALT_SCHEMA_TABLE[7])==0 ){ + if( sqlite3StrICmp(zName+7, &PREFERRED_SCHEMA_TABLE[7])==0 ){ p = sqlite3HashFind(&db->aDb[i].pSchema->tblHash, - DFLT_SCHEMA_TABLE); + LEGACY_SCHEMA_TABLE); } } } @@ -112818,11 +114359,11 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha if( p ) break; } if( p==0 && sqlite3StrNICmp(zName, "sqlite_", 7)==0 ){ - if( sqlite3StrICmp(zName+7, &ALT_SCHEMA_TABLE[7])==0 ){ - p = sqlite3HashFind(&db->aDb[0].pSchema->tblHash, DFLT_SCHEMA_TABLE); - }else if( sqlite3StrICmp(zName+7, &ALT_TEMP_SCHEMA_TABLE[7])==0 ){ + if( sqlite3StrICmp(zName+7, &PREFERRED_SCHEMA_TABLE[7])==0 ){ + p = sqlite3HashFind(&db->aDb[0].pSchema->tblHash, LEGACY_SCHEMA_TABLE); + }else if( sqlite3StrICmp(zName+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 ){ p = sqlite3HashFind(&db->aDb[1].pSchema->tblHash, - DFLT_TEMP_SCHEMA_TABLE); + LEGACY_TEMP_SCHEMA_TABLE); } } } @@ -112868,6 +114409,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( pMod = sqlite3PragmaVtabRegister(db, zName); } if( pMod && sqlite3VtabEponymousTableInit(pParse, pMod) ){ + testcase( pMod->pEpoTab==0 ); return pMod->pEpoTab; } } @@ -112917,6 +114459,22 @@ SQLITE_PRIVATE Table *sqlite3LocateTableItem( return sqlite3LocateTable(pParse, flags, p->zName, zDb); } +/* +** Return the preferred table name for system tables. Translate legacy +** names into the new preferred names, as appropriate. +*/ +SQLITE_PRIVATE const char *sqlite3PreferredTableName(const char *zName){ + if( sqlite3StrNICmp(zName, "sqlite_", 7)==0 ){ + if( sqlite3StrICmp(zName+7, &LEGACY_SCHEMA_TABLE[7])==0 ){ + return PREFERRED_SCHEMA_TABLE; + } + if( sqlite3StrICmp(zName+7, &LEGACY_TEMP_SCHEMA_TABLE[7])==0 ){ + return PREFERRED_TEMP_SCHEMA_TABLE; + } + } + return zName; +} + /* ** Locate the in-memory structure that describes ** a particular index given the name of that index @@ -113081,6 +114639,84 @@ SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3 *db){ db->mDbFlags &= ~DBFLAG_SchemaChange; } +/* +** Set the expression associated with a column. This is usually +** the DEFAULT value, but might also be the expression that computes +** the value for a generated column. +*/ +SQLITE_PRIVATE void sqlite3ColumnSetExpr( + Parse *pParse, /* Parsing context */ + Table *pTab, /* The table containing the column */ + Column *pCol, /* The column to receive the new DEFAULT expression */ + Expr *pExpr /* The new default expression */ +){ + ExprList *pList; + assert( IsOrdinaryTable(pTab) ); + pList = pTab->u.tab.pDfltList; + if( pCol->iDflt==0 + || NEVER(pList==0) + || NEVER(pList->nExpriDflt) + ){ + pCol->iDflt = pList==0 ? 1 : pList->nExpr+1; + pTab->u.tab.pDfltList = sqlite3ExprListAppend(pParse, pList, pExpr); + }else{ + sqlite3ExprDelete(pParse->db, pList->a[pCol->iDflt-1].pExpr); + pList->a[pCol->iDflt-1].pExpr = pExpr; + } +} + +/* +** Return the expression associated with a column. The expression might be +** the DEFAULT clause or the AS clause of a generated column. +** Return NULL if the column has no associated expression. +*/ +SQLITE_PRIVATE Expr *sqlite3ColumnExpr(Table *pTab, Column *pCol){ + if( pCol->iDflt==0 ) return 0; + if( NEVER(!IsOrdinaryTable(pTab)) ) return 0; + if( NEVER(pTab->u.tab.pDfltList==0) ) return 0; + if( NEVER(pTab->u.tab.pDfltList->nExpriDflt) ) return 0; + return pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr; +} + +/* +** Set the collating sequence name for a column. +*/ +SQLITE_PRIVATE void sqlite3ColumnSetColl( + sqlite3 *db, + Column *pCol, + const char *zColl +){ + i64 nColl; + i64 n; + char *zNew; + assert( zColl!=0 ); + n = sqlite3Strlen30(pCol->zCnName) + 1; + if( pCol->colFlags & COLFLAG_HASTYPE ){ + n += sqlite3Strlen30(pCol->zCnName+n) + 1; + } + nColl = sqlite3Strlen30(zColl) + 1; + zNew = sqlite3DbRealloc(db, pCol->zCnName, nColl+n); + if( zNew ){ + pCol->zCnName = zNew; + memcpy(pCol->zCnName + n, zColl, nColl); + pCol->colFlags |= COLFLAG_HASCOLL; + } +} + +/* +** Return the collating squence name for a column +*/ +SQLITE_PRIVATE const char *sqlite3ColumnColl(Column *pCol){ + const char *z; + if( (pCol->colFlags & COLFLAG_HASCOLL)==0 ) return 0; + z = pCol->zCnName; + while( *z ){ z++; } + if( pCol->colFlags & COLFLAG_HASTYPE ){ + do{ z++; }while( *z ); + } + return z+1; +} + /* ** Delete memory allocated for the column names of a table or view (the ** Table.aCol[] array). @@ -113091,12 +114727,20 @@ SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3 *db, Table *pTable){ assert( pTable!=0 ); if( (pCol = pTable->aCol)!=0 ){ for(i=0; inCol; i++, pCol++){ - assert( pCol->zName==0 || pCol->hName==sqlite3StrIHash(pCol->zName) ); - sqlite3DbFree(db, pCol->zName); - sqlite3ExprDelete(db, pCol->pDflt); - sqlite3DbFree(db, pCol->zColl); + assert( pCol->zCnName==0 || pCol->hName==sqlite3StrIHash(pCol->zCnName) ); + sqlite3DbFree(db, pCol->zCnName); } sqlite3DbFree(db, pTable->aCol); + if( IsOrdinaryTable(pTable) ){ + sqlite3ExprListDelete(db, pTable->u.tab.pDfltList); + } + if( db==0 || db->pnBytesFreed==0 ){ + pTable->aCol = 0; + pTable->nCol = 0; + if( IsOrdinaryTable(pTable) ){ + pTable->u.tab.pDfltList = 0; + } + } } } @@ -113148,19 +114792,25 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ sqlite3FreeIndex(db, pIndex); } - /* Delete any foreign keys attached to this table. */ - sqlite3FkDelete(db, pTable); + if( IsOrdinaryTable(pTable) ){ + sqlite3FkDelete(db, pTable); + } +#ifndef SQLITE_OMIT_VIRTUAL_TABLE + else if( IsVirtual(pTable) ){ + sqlite3VtabClear(db, pTable); + } +#endif + else{ + assert( IsView(pTable) ); + sqlite3SelectDelete(db, pTable->u.view.pSelect); + } /* Delete the Table structure itself. */ sqlite3DeleteColumnNames(db, pTable); sqlite3DbFree(db, pTable->zName); sqlite3DbFree(db, pTable->zColAff); - sqlite3SelectDelete(db, pTable->pSelect); sqlite3ExprListDelete(db, pTable->pCheck); -#ifndef SQLITE_OMIT_VIRTUALTABLE - sqlite3VtabClear(db, pTable); -#endif sqlite3DbFree(db, pTable); /* Verify that no lookaside memory was used by schema tables */ @@ -113206,10 +114856,10 @@ SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3 *db, int iDb, const char ** are not \000 terminated and are not persistent. The returned string ** is \000 terminated and is persistent. */ -SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, Token *pName){ +SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, const Token *pName){ char *zName; if( pName ){ - zName = sqlite3DbStrNDup(db, (char*)pName->z, pName->n); + zName = sqlite3DbStrNDup(db, (const char*)pName->z, pName->n); sqlite3Dequote(zName); }else{ zName = 0; @@ -113223,7 +114873,7 @@ SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, Token *pName){ */ SQLITE_PRIVATE void sqlite3OpenSchemaTable(Parse *p, int iDb){ Vdbe *v = sqlite3GetVdbe(p); - sqlite3TableLock(p, iDb, SCHEMA_ROOT, 1, DFLT_SCHEMA_TABLE); + sqlite3TableLock(p, iDb, SCHEMA_ROOT, 1, LEGACY_SCHEMA_TABLE); sqlite3VdbeAddOp4Int(v, OP_OpenWrite, 0, SCHEMA_ROOT, iDb, 5); if( p->nTab==0 ){ p->nTab = 1; @@ -113584,7 +115234,8 @@ SQLITE_PRIVATE void sqlite3StartTable( pTable = sqlite3FindTable(db, zName, zDb); if( pTable ){ if( !noErr ){ - sqlite3ErrorMsg(pParse, "table %T already exists", pName); + sqlite3ErrorMsg(pParse, "%s %T already exists", + (IsView(pTable)? "view" : "table"), pName); }else{ assert( !db->init.busy || CORRUPT_DB ); sqlite3CodeVerifySchema(pParse, iDb); @@ -113686,6 +115337,7 @@ SQLITE_PRIVATE void sqlite3StartTable( /* If an error occurs, we jump here */ begin_table_error: + pParse->checkSchema = 1; sqlite3DbFree(db, zName); return; } @@ -113695,7 +115347,7 @@ SQLITE_PRIVATE void sqlite3StartTable( */ #if SQLITE_ENABLE_HIDDEN_COLUMNS SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){ - if( sqlite3_strnicmp(pCol->zName, "__hidden__", 10)==0 ){ + if( sqlite3_strnicmp(pCol->zCnName, "__hidden__", 10)==0 ){ pCol->colFlags |= COLFLAG_HIDDEN; if( pTab ) pTab->tabFlags |= TF_HasHidden; }else if( pTab && pCol!=pTab->aCol && (pCol[-1].colFlags & COLFLAG_HIDDEN) ){ @@ -113786,7 +115438,7 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ ** first to get things going. Then this routine is called for each ** column. */ -SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ +SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ Table *p; int i; char *z; @@ -113794,55 +115446,96 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ Column *pCol; sqlite3 *db = pParse->db; u8 hName; + Column *aNew; + u8 eType = COLTYPE_CUSTOM; + u8 szEst = 1; + char affinity = SQLITE_AFF_BLOB; if( (p = pParse->pNewTable)==0 ) return; if( p->nCol+1>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName); return; } - z = sqlite3DbMallocRaw(db, pName->n + pType->n + 2); + if( !IN_RENAME_OBJECT ) sqlite3DequoteToken(&sName); + + /* Because keywords GENERATE ALWAYS can be converted into indentifiers + ** by the parser, we can sometimes end up with a typename that ends + ** with "generated always". Check for this case and omit the surplus + ** text. */ + if( sType.n>=16 + && sqlite3_strnicmp(sType.z+(sType.n-6),"always",6)==0 + ){ + sType.n -= 6; + while( ALWAYS(sType.n>0) && sqlite3Isspace(sType.z[sType.n-1]) ) sType.n--; + if( sType.n>=9 + && sqlite3_strnicmp(sType.z+(sType.n-9),"generated",9)==0 + ){ + sType.n -= 9; + while( sType.n>0 && sqlite3Isspace(sType.z[sType.n-1]) ) sType.n--; + } + } + + /* Check for standard typenames. For standard typenames we will + ** set the Column.eType field rather than storing the typename after + ** the column name, in order to save space. */ + if( sType.n>=3 ){ + sqlite3DequoteToken(&sType); + for(i=0; i0) ); if( z==0 ) return; - if( IN_RENAME_OBJECT ) sqlite3RenameTokenMap(pParse, (void*)z, pName); - memcpy(z, pName->z, pName->n); - z[pName->n] = 0; + if( IN_RENAME_OBJECT ) sqlite3RenameTokenMap(pParse, (void*)z, &sName); + memcpy(z, sName.z, sName.n); + z[sName.n] = 0; sqlite3Dequote(z); hName = sqlite3StrIHash(z); for(i=0; inCol; i++){ - if( p->aCol[i].hName==hName && sqlite3StrICmp(z, p->aCol[i].zName)==0 ){ + if( p->aCol[i].hName==hName && sqlite3StrICmp(z, p->aCol[i].zCnName)==0 ){ sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); sqlite3DbFree(db, z); return; } } - if( (p->nCol & 0x7)==0 ){ - Column *aNew; - aNew = sqlite3DbRealloc(db,p->aCol,(p->nCol+8)*sizeof(p->aCol[0])); - if( aNew==0 ){ - sqlite3DbFree(db, z); - return; - } - p->aCol = aNew; + aNew = sqlite3DbRealloc(db,p->aCol,((i64)p->nCol+1)*sizeof(p->aCol[0])); + if( aNew==0 ){ + sqlite3DbFree(db, z); + return; } + p->aCol = aNew; pCol = &p->aCol[p->nCol]; memset(pCol, 0, sizeof(p->aCol[0])); - pCol->zName = z; + pCol->zCnName = z; pCol->hName = hName; sqlite3ColumnPropertiesFromName(p, pCol); - if( pType->n==0 ){ + if( sType.n==0 ){ /* If there is no type specified, columns have the default affinity ** 'BLOB' with a default size of 4 bytes. */ - pCol->affinity = SQLITE_AFF_BLOB; - pCol->szEst = 1; + pCol->affinity = affinity; + pCol->eCType = eType; + pCol->szEst = szEst; #ifdef SQLITE_ENABLE_SORTER_REFERENCES - if( 4>=sqlite3GlobalConfig.szSorterRef ){ - pCol->colFlags |= COLFLAG_SORTERREF; + if( affinity==SQLITE_AFF_BLOB ){ + if( 4>=sqlite3GlobalConfig.szSorterRef ){ + pCol->colFlags |= COLFLAG_SORTERREF; + } } #endif }else{ zType = z + sqlite3Strlen30(z) + 1; - memcpy(zType, pType->z, pType->n); - zType[pType->n] = 0; + memcpy(zType, sType.z, sType.n); + zType[sType.n] = 0; sqlite3Dequote(zType); pCol->affinity = sqlite3AffinityType(zType, pCol); pCol->colFlags |= COLFLAG_HASTYPE; @@ -113997,7 +115690,7 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue( pCol = &(p->aCol[p->nCol-1]); if( !sqlite3ExprIsConstantOrFunction(pExpr, isInit) ){ sqlite3ErrorMsg(pParse, "default value of column [%s] is not constant", - pCol->zName); + pCol->zCnName); #ifndef SQLITE_OMIT_GENERATED_COLUMNS }else if( pCol->colFlags & COLFLAG_GENERATED ){ testcase( pCol->colFlags & COLFLAG_VIRTUAL ); @@ -114008,15 +115701,15 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue( /* A copy of pExpr is used instead of the original, as pExpr contains ** tokens that point to volatile memory. */ - Expr x; - sqlite3ExprDelete(db, pCol->pDflt); + Expr x, *pDfltExpr; memset(&x, 0, sizeof(x)); x.op = TK_SPAN; x.u.zToken = sqlite3DbSpanDup(db, zStart, zEnd); x.pLeft = pExpr; x.flags = EP_Skip; - pCol->pDflt = sqlite3ExprDup(db, &x, EXPRDUP_REDUCE); + pDfltExpr = sqlite3ExprDup(db, &x, EXPRDUP_REDUCE); sqlite3DbFree(db, x.u.zToken); + sqlite3ColumnSetExpr(pParse, p, pCol, pDfltExpr); } } if( IN_RENAME_OBJECT ){ @@ -114112,9 +115805,11 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey( assert( pCExpr!=0 ); sqlite3StringToId(pCExpr); if( pCExpr->op==TK_ID ){ - const char *zCName = pCExpr->u.zToken; + const char *zCName; + assert( !ExprHasProperty(pCExpr, EP_IntValue) ); + zCName = pCExpr->u.zToken; for(iCol=0; iColnCol; iCol++){ - if( sqlite3StrICmp(zCName, pTab->aCol[iCol].zName)==0 ){ + if( sqlite3StrICmp(zCName, pTab->aCol[iCol].zCnName)==0 ){ pCol = &pTab->aCol[iCol]; makeColumnPartOfPrimaryKey(pParse, pCol); break; @@ -114125,7 +115820,7 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey( } if( nTerm==1 && pCol - && sqlite3StrICmp(sqlite3ColumnType(pCol,""), "INTEGER")==0 + && pCol->eCType==COLTYPE_INTEGER && sortOrder!=SQLITE_SO_DESC ){ if( IN_RENAME_OBJECT && pList ){ @@ -114205,8 +115900,7 @@ SQLITE_PRIVATE void sqlite3AddCollateType(Parse *pParse, Token *pToken){ if( sqlite3LocateCollSeq(pParse, zColl) ){ Index *pIdx; - sqlite3DbFree(db, p->aCol[i].zColl); - p->aCol[i].zColl = zColl; + sqlite3ColumnSetColl(db, &p->aCol[i], zColl); /* If the column is declared as " PRIMARY KEY COLLATE ", ** then an index may have been created on this column before the @@ -114215,12 +115909,11 @@ SQLITE_PRIVATE void sqlite3AddCollateType(Parse *pParse, Token *pToken){ for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ assert( pIdx->nKeyCol==1 ); if( pIdx->aiColumn[0]==i ){ - pIdx->azColl[0] = p->aCol[i].zColl; + pIdx->azColl[0] = sqlite3ColumnColl(&p->aCol[i]); } } - }else{ - sqlite3DbFree(db, zColl); } + sqlite3DbFree(db, zColl); } /* Change the most recently parsed column to be a GENERATED ALWAYS AS @@ -114240,7 +115933,7 @@ SQLITE_PRIVATE void sqlite3AddGenerated(Parse *pParse, Expr *pExpr, Token *pType sqlite3ErrorMsg(pParse, "virtual tables cannot use computed columns"); goto generated_done; } - if( pCol->pDflt ) goto generated_error; + if( pCol->iDflt>0 ) goto generated_error; if( pType ){ if( pType->n==7 && sqlite3StrNICmp("virtual",pType->z,7)==0 ){ /* no-op */ @@ -114258,13 +115951,13 @@ SQLITE_PRIVATE void sqlite3AddGenerated(Parse *pParse, Expr *pExpr, Token *pType if( pCol->colFlags & COLFLAG_PRIMKEY ){ makeColumnPartOfPrimaryKey(pParse, pCol); /* For the error message */ } - pCol->pDflt = pExpr; + sqlite3ColumnSetExpr(pParse, pTab, pCol, pExpr); pExpr = 0; goto generated_done; generated_error: sqlite3ErrorMsg(pParse, "error in generated column \"%s\"", - pCol->zName); + pCol->zCnName); generated_done: sqlite3ExprDelete(pParse->db, pExpr); #else @@ -114366,7 +116059,7 @@ static char *createTableStmt(sqlite3 *db, Table *p){ Column *pCol; n = 0; for(pCol = p->aCol, i=0; inCol; i++, pCol++){ - n += identLength(pCol->zName) + 5; + n += identLength(pCol->zCnName) + 5; } n += identLength(p->zName); if( n<50 ){ @@ -114402,7 +116095,7 @@ static char *createTableStmt(sqlite3 *db, Table *p){ sqlite3_snprintf(n-k, &zStmt[k], zSep); k += sqlite3Strlen30(&zStmt[k]); zSep = zSep2; - identPut(zStmt, &k, pCol->zName); + identPut(zStmt, &k, pCol->zCnName); assert( pCol->affinity-SQLITE_AFF_BLOB >= 0 ); assert( pCol->affinity-SQLITE_AFF_BLOB < ArraySize(azType) ); testcase( pCol->affinity==SQLITE_AFF_BLOB ); @@ -114486,7 +116179,6 @@ static void estimateIndexWidth(Index *pIdx){ */ static int hasColumn(const i16 *aiCol, int nCol, int x){ while( nCol-- > 0 ){ - assert( aiCol[0]>=0 ); if( x==*(aiCol++) ){ return 1; } @@ -114599,7 +116291,9 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ */ if( !db->init.imposterTable ){ for(i=0; inCol; i++){ - if( (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 ){ + if( (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 + && (pTab->aCol[i].notNull==OE_None) + ){ pTab->aCol[i].notNull = OE_Abort; } } @@ -114621,7 +116315,7 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ if( pTab->iPKey>=0 ){ ExprList *pList; Token ipkToken; - sqlite3TokenInit(&ipkToken, pTab->aCol[pTab->iPKey].zName); + sqlite3TokenInit(&ipkToken, pTab->aCol[pTab->iPKey].zCnName); pList = sqlite3ExprListAppend(pParse, 0, sqlite3ExprAlloc(db, TK_ID, &ipkToken, 0)); if( pList==0 ){ @@ -114636,10 +116330,11 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ pTab->iPKey = -1; sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0, SQLITE_IDXTYPE_PRIMARYKEY); - if( db->mallocFailed || pParse->nErr ){ + if( pParse->nErr ){ pTab->tabFlags &= ~TF_WithoutRowid; return; } + assert( db->mallocFailed==0 ); pPk = sqlite3PrimaryKeyIndex(pTab); assert( pPk->nKeyCol==1 ); }else{ @@ -114751,7 +116446,7 @@ SQLITE_PRIVATE int sqlite3IsShadowTableOf(sqlite3 *db, Table *pTab, const char * nName = sqlite3Strlen30(pTab->zName); if( sqlite3_strnicmp(zName, pTab->zName, nName)!=0 ) return 0; if( zName[nName]!='_' ) return 0; - pMod = (Module*)sqlite3HashFind(&db->aModule, pTab->azModuleArg[0]); + pMod = (Module*)sqlite3HashFind(&db->aModule, pTab->u.vtab.azArg[0]); if( pMod==0 ) return 0; if( pMod->pModule->iVersion<3 ) return 0; if( pMod->pModule->xShadowName==0 ) return 0; @@ -114759,6 +116454,41 @@ SQLITE_PRIVATE int sqlite3IsShadowTableOf(sqlite3 *db, Table *pTab, const char * } #endif /* ifndef SQLITE_OMIT_VIRTUALTABLE */ +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Table pTab is a virtual table. If it the virtual table implementation +** exists and has an xShadowName method, then loop over all other ordinary +** tables within the same schema looking for shadow tables of pTab, and mark +** any shadow tables seen using the TF_Shadow flag. +*/ +SQLITE_PRIVATE void sqlite3MarkAllShadowTablesOf(sqlite3 *db, Table *pTab){ + int nName; /* Length of pTab->zName */ + Module *pMod; /* Module for the virtual table */ + HashElem *k; /* For looping through the symbol table */ + + assert( IsVirtual(pTab) ); + pMod = (Module*)sqlite3HashFind(&db->aModule, pTab->u.vtab.azArg[0]); + if( pMod==0 ) return; + if( NEVER(pMod->pModule==0) ) return; + if( pMod->pModule->iVersion<3 ) return; + if( pMod->pModule->xShadowName==0 ) return; + assert( pTab->zName!=0 ); + nName = sqlite3Strlen30(pTab->zName); + for(k=sqliteHashFirst(&pTab->pSchema->tblHash); k; k=sqliteHashNext(k)){ + Table *pOther = sqliteHashData(k); + assert( pOther->zName!=0 ); + if( !IsOrdinaryTable(pOther) ) continue; + if( pOther->tabFlags & TF_Shadow ) continue; + if( sqlite3StrNICmp(pOther->zName, pTab->zName, nName)==0 + && pOther->zName[nName]=='_' + && pMod->pModule->xShadowName(pOther->zName+nName+1) + ){ + pOther->tabFlags |= TF_Shadow; + } + } +} +#endif /* ifndef SQLITE_OMIT_VIRTUALTABLE */ + #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Return true if zName is a shadow table name in the current database @@ -114832,7 +116562,7 @@ SQLITE_PRIVATE void sqlite3EndTable( Parse *pParse, /* Parse context */ Token *pCons, /* The ',' token after the last column defn. */ Token *pEnd, /* The ')' before options in the CREATE TABLE */ - u8 tabOpts, /* Extra table options. Usually 0. */ + u32 tabOpts, /* Extra table options. Usually 0. */ Select *pSelect /* Select from a "CREATE ... AS SELECT" */ ){ Table *p; /* The new table */ @@ -114860,7 +116590,7 @@ SQLITE_PRIVATE void sqlite3EndTable( ** table itself. So mark it read-only. */ if( db->init.busy ){ - if( pSelect ){ + if( pSelect || (!IsOrdinaryTable(p) && db->init.newTnum) ){ sqlite3ErrorMsg(pParse, ""); return; } @@ -114868,6 +116598,44 @@ SQLITE_PRIVATE void sqlite3EndTable( if( p->tnum==1 ) p->tabFlags |= TF_Readonly; } + /* Special processing for tables that include the STRICT keyword: + ** + ** * Do not allow custom column datatypes. Every column must have + ** a datatype that is one of INT, INTEGER, REAL, TEXT, or BLOB. + ** + ** * If a PRIMARY KEY is defined, other than the INTEGER PRIMARY KEY, + ** then all columns of the PRIMARY KEY must have a NOT NULL + ** constraint. + */ + if( tabOpts & TF_Strict ){ + int ii; + p->tabFlags |= TF_Strict; + for(ii=0; iinCol; ii++){ + Column *pCol = &p->aCol[ii]; + if( pCol->eCType==COLTYPE_CUSTOM ){ + if( pCol->colFlags & COLFLAG_HASTYPE ){ + sqlite3ErrorMsg(pParse, + "unknown datatype for %s.%s: \"%s\"", + p->zName, pCol->zCnName, sqlite3ColumnType(pCol, "") + ); + }else{ + sqlite3ErrorMsg(pParse, "missing datatype for %s.%s", + p->zName, pCol->zCnName); + } + return; + }else if( pCol->eCType==COLTYPE_ANY ){ + pCol->affinity = SQLITE_AFF_BLOB; + } + if( (pCol->colFlags & COLFLAG_PRIMKEY)!=0 + && p->iPKey!=ii + && pCol->notNull == OE_None + ){ + pCol->notNull = OE_Abort; + p->tabFlags |= TF_HasNotNull; + } + } + } + assert( (p->tabFlags & TF_HasPrimaryKey)==0 || p->iPKey>=0 || sqlite3PrimaryKeyIndex(p)!=0 ); assert( (p->tabFlags & TF_HasPrimaryKey)!=0 @@ -114912,7 +116680,7 @@ SQLITE_PRIVATE void sqlite3EndTable( for(ii=0; iinCol; ii++){ u32 colFlags = p->aCol[ii].colFlags; if( (colFlags & COLFLAG_GENERATED)!=0 ){ - Expr *pX = p->aCol[ii].pDflt; + Expr *pX = sqlite3ColumnExpr(p, &p->aCol[ii]); testcase( colFlags & COLFLAG_VIRTUAL ); testcase( colFlags & COLFLAG_STORED ); if( sqlite3ResolveSelfReference(pParse, p, NC_GenCol, pX, 0) ){ @@ -114922,8 +116690,8 @@ SQLITE_PRIVATE void sqlite3EndTable( ** tree that have been allocated from lookaside memory, which is ** illegal in a schema and will lead to errors or heap corruption ** when the database connection closes. */ - sqlite3ExprDelete(db, pX); - p->aCol[ii].pDflt = sqlite3ExprAlloc(db, TK_NULL, 0, 0); + sqlite3ColumnSetExpr(pParse, p, &p->aCol[ii], + sqlite3ExprAlloc(db, TK_NULL, 0, 0)); } }else{ nNG++; @@ -114963,7 +116731,7 @@ SQLITE_PRIVATE void sqlite3EndTable( /* ** Initialize zType for the new view or table. */ - if( p->pSelect==0 ){ + if( IsOrdinaryTable(p) ){ /* A regular table */ zType = "table"; zType2 = "TABLE"; @@ -114997,6 +116765,11 @@ SQLITE_PRIVATE void sqlite3EndTable( int addrInsLoop; /* Top of the loop for inserting rows */ Table *pSelTab; /* A table that describes the SELECT results */ + if( IN_SPECIAL_PARSE ){ + pParse->rc = SQLITE_ERROR; + pParse->nErr++; + return; + } regYield = ++pParse->nMem; regRec = ++pParse->nMem; regRowid = ++pParse->nMem; @@ -115049,7 +116822,7 @@ SQLITE_PRIVATE void sqlite3EndTable( ** the information we've collected. */ sqlite3NestedParse(pParse, - "UPDATE %Q." DFLT_SCHEMA_TABLE + "UPDATE %Q." LEGACY_SCHEMA_TABLE " SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q" " WHERE rowid=#%d", db->aDb[iDb].zDbSName, @@ -115113,12 +116886,12 @@ SQLITE_PRIVATE void sqlite3EndTable( } #ifndef SQLITE_OMIT_ALTERTABLE - if( !pSelect && !p->pSelect ){ + if( !pSelect && IsOrdinaryTable(p) ){ assert( pCons && pEnd ); if( pCons->z==0 ){ pCons = pEnd; } - p->addColOffset = 13 + (int)(pCons->z - pParse->sNameToken.z); + p->u.tab.addColOffset = 13 + (int)(pCons->z - pParse->sNameToken.z); } #endif } @@ -115175,12 +116948,13 @@ SQLITE_PRIVATE void sqlite3CreateView( */ pSelect->selFlags |= SF_View; if( IN_RENAME_OBJECT ){ - p->pSelect = pSelect; + p->u.view.pSelect = pSelect; pSelect = 0; }else{ - p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); + p->u.view.pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); } p->pCheck = sqlite3ExprListDup(db, pCNames, EXPRDUP_REDUCE); + p->eTabType = TABTYP_VIEW; if( db->mallocFailed ) goto create_view_fail; /* Locate the end of the CREATE VIEW statement. Make sEnd point to @@ -115234,13 +117008,12 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ assert( pTable ); #ifndef SQLITE_OMIT_VIRTUALTABLE - db->nSchemaLock++; - rc = sqlite3VtabCallConnect(pParse, pTable); - db->nSchemaLock--; - if( rc ){ - return 1; + if( IsVirtual(pTable) ){ + db->nSchemaLock++; + rc = sqlite3VtabCallConnect(pParse, pTable); + db->nSchemaLock--; + return rc; } - if( IsVirtual(pTable) ) return 0; #endif #ifndef SQLITE_OMIT_VIEW @@ -115277,8 +117050,8 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ ** to be permanent. So the computation is done on a copy of the SELECT ** statement that defines the view. */ - assert( pTable->pSelect ); - pSel = sqlite3SelectDup(db, pTable->pSelect, 0); + assert( IsView(pTable) ); + pSel = sqlite3SelectDup(db, pTable->u.view.pSelect, 0); if( pSel ){ u8 eParseMode = pParse->eParseMode; pParse->eParseMode = PARSE_MODE_NORMAL; @@ -115307,10 +117080,10 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ */ sqlite3ColumnsFromExprList(pParse, pTable->pCheck, &pTable->nCol, &pTable->aCol); - if( db->mallocFailed==0 - && pParse->nErr==0 + if( pParse->nErr==0 && pTable->nCol==pSel->pEList->nExpr ){ + assert( db->mallocFailed==0 ); sqlite3SelectAddColumnTypeAndCollation(pParse, pTable, pSel, SQLITE_AFF_NONE); } @@ -115337,8 +117110,6 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ pTable->pSchema->schemaFlags |= DB_UnresetViews; if( db->mallocFailed ){ sqlite3DeleteColumnNames(db, pTable); - pTable->aCol = 0; - pTable->nCol = 0; } #endif /* SQLITE_OMIT_VIEW */ return nErr; @@ -115355,10 +117126,8 @@ static void sqliteViewResetAll(sqlite3 *db, int idx){ if( !DbHasProperty(db, idx, DB_UnresetViews) ) return; for(i=sqliteHashFirst(&db->aDb[idx].pSchema->tblHash); i;i=sqliteHashNext(i)){ Table *pTab = sqliteHashData(i); - if( pTab->pSelect ){ + if( IsView(pTab) ){ sqlite3DeleteColumnNames(db, pTab); - pTab->aCol = 0; - pTab->nCol = 0; } } DbClearProperty(db, idx, DB_UnresetViews); @@ -115432,7 +117201,7 @@ static void destroyRootPage(Parse *pParse, int iTable, int iDb){ ** token for additional information. */ sqlite3NestedParse(pParse, - "UPDATE %Q." DFLT_SCHEMA_TABLE + "UPDATE %Q." LEGACY_SCHEMA_TABLE " SET rootpage=%d WHERE #%d AND rootpage=#%d", pParse->db->aDb[iDb].zDbSName, iTable, r1, r1); #endif @@ -115567,7 +117336,7 @@ SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, in ** database. */ sqlite3NestedParse(pParse, - "DELETE FROM %Q." DFLT_SCHEMA_TABLE + "DELETE FROM %Q." LEGACY_SCHEMA_TABLE " WHERE tbl_name=%Q and type!='trigger'", pDb->zDbSName, pTab->zName); if( !isView && !IsVirtual(pTab) ){ @@ -115595,6 +117364,7 @@ SQLITE_PRIVATE int sqlite3ReadOnlyShadowTables(sqlite3 *db){ if( (db->flags & SQLITE_Defensive)!=0 && db->pVtabCtx==0 && db->nVdbeExec==0 + && !sqlite3VtabInSync(db) ){ return 1; } @@ -115614,6 +117384,9 @@ static int tableMayNotBeDropped(sqlite3 *db, Table *pTab){ if( (pTab->tabFlags & TF_Shadow)!=0 && sqlite3ReadOnlyShadowTables(db) ){ return 1; } + if( pTab->tabFlags & TF_Eponymous ){ + return 1; + } return 0; } @@ -115698,11 +117471,11 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, /* Ensure DROP TABLE is not used on a view, and DROP VIEW is not used ** on a table. */ - if( isView && pTab->pSelect==0 ){ + if( isView && !IsView(pTab) ){ sqlite3ErrorMsg(pParse, "use DROP TABLE to delete table %s", pTab->zName); goto exit_drop_table; } - if( !isView && pTab->pSelect ){ + if( !isView && IsView(pTab) ){ sqlite3ErrorMsg(pParse, "use DROP VIEW to delete view %s", pTab->zName); goto exit_drop_table; } @@ -115753,7 +117526,7 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey( FKey *pFKey = 0; FKey *pNextTo; Table *p = pParse->pNewTable; - int nByte; + i64 nByte; int i; int nCol; char *z; @@ -115766,7 +117539,7 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey( if( pToCol && pToCol->nExpr!=1 ){ sqlite3ErrorMsg(pParse, "foreign key on %s" " should reference only one column of table %T", - p->aCol[iCol].zName, pTo); + p->aCol[iCol].zCnName, pTo); goto fk_end; } nCol = 1; @@ -115789,7 +117562,8 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey( goto fk_end; } pFKey->pFrom = p; - pFKey->pNextFrom = p->pFKey; + assert( IsOrdinaryTable(p) ); + pFKey->pNextFrom = p->u.tab.pFKey; z = (char*)&pFKey->aCol[nCol]; pFKey->zTo = z; if( IN_RENAME_OBJECT ){ @@ -115806,7 +117580,7 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey( for(i=0; inCol; j++){ - if( sqlite3StrICmp(p->aCol[j].zName, pFromCol->a[i].zEName)==0 ){ + if( sqlite3StrICmp(p->aCol[j].zCnName, pFromCol->a[i].zEName)==0 ){ pFKey->aCol[i].iFrom = j; break; } @@ -115854,7 +117628,8 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey( /* Link the foreign key to the table as the last step. */ - p->pFKey = pFKey; + assert( IsOrdinaryTable(p) ); + p->u.tab.pFKey = pFKey; pFKey = 0; fk_end: @@ -115875,7 +117650,9 @@ SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse *pParse, int isDeferred){ #ifndef SQLITE_OMIT_FOREIGN_KEY Table *pTab; FKey *pFKey; - if( (pTab = pParse->pNewTable)==0 || (pFKey = pTab->pFKey)==0 ) return; + if( (pTab = pParse->pNewTable)==0 ) return; + if( NEVER(!IsOrdinaryTable(pTab)) ) return; + if( (pFKey = pTab->u.tab.pFKey)==0 ) return; assert( isDeferred==0 || isDeferred==1 ); /* EV: R-30323-21917 */ pFKey->isDeferred = (u8)isDeferred; #endif @@ -115925,7 +117702,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ tnum = pIndex->tnum; } pKey = sqlite3KeyInfoOfIndex(pParse, pIndex); - assert( pKey!=0 || db->mallocFailed || pParse->nErr ); + assert( pKey!=0 || pParse->nErr ); /* Open the sorter cursor if we are to use one. */ iSorter = pParse->nTab++; @@ -116089,9 +117866,11 @@ SQLITE_PRIVATE void sqlite3CreateIndex( char *zExtra = 0; /* Extra space after the Index object */ Index *pPk = 0; /* PRIMARY KEY index for WITHOUT ROWID tables */ - if( db->mallocFailed || pParse->nErr>0 ){ + assert( db->pParse==pParse ); + if( pParse->nErr ){ goto exit_create_index; } + assert( db->mallocFailed==0 ); if( IN_DECLARE_VTAB && idxType!=SQLITE_IDXTYPE_PRIMARYKEY ){ goto exit_create_index; } @@ -116155,7 +117934,6 @@ SQLITE_PRIVATE void sqlite3CreateIndex( pDb = &db->aDb[iDb]; assert( pTab!=0 ); - assert( pParse->nErr==0 ); if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 && db->init.busy==0 && pTblName!=0 @@ -116167,7 +117945,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( goto exit_create_index; } #ifndef SQLITE_OMIT_VIEW - if( pTab->pSelect ){ + if( IsView(pTab) ){ sqlite3ErrorMsg(pParse, "views may not be indexed"); goto exit_create_index; } @@ -116258,7 +118036,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( Token prevCol; Column *pCol = &pTab->aCol[pTab->nCol-1]; pCol->colFlags |= COLFLAG_UNIQUE; - sqlite3TokenInit(&prevCol, pCol->zName); + sqlite3TokenInit(&prevCol, pCol->zCnName); pList = sqlite3ExprListAppend(pParse, 0, sqlite3ExprAlloc(db, TK_ID, &prevCol, 0)); if( pList==0 ) goto exit_create_index; @@ -116276,6 +118054,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( Expr *pExpr = pList->a[i].pExpr; assert( pExpr!=0 ); if( pExpr->op==TK_COLLATE ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); nExtra += (1 + sqlite3Strlen30(pExpr->u.zToken)); } } @@ -116371,6 +118150,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( zColl = 0; if( pListItem->pExpr->op==TK_COLLATE ){ int nColl; + assert( !ExprHasProperty(pListItem->pExpr, EP_IntValue) ); zColl = pListItem->pExpr->u.zToken; nColl = sqlite3Strlen30(zColl) + 1; assert( nExtra>=nColl ); @@ -116379,7 +118159,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( zExtra += nColl; nExtra -= nColl; }else if( j>=0 ){ - zColl = pTab->aCol[j].zColl; + zColl = sqlite3ColumnColl(&pTab->aCol[j]); } if( !zColl ) zColl = sqlite3StrBINARY; if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl) ){ @@ -116577,13 +118357,13 @@ SQLITE_PRIVATE void sqlite3CreateIndex( /* Add an entry in sqlite_schema for this index */ sqlite3NestedParse(pParse, - "INSERT INTO %Q." DFLT_SCHEMA_TABLE " VALUES('index',%Q,%Q,#%d,%Q);", - db->aDb[iDb].zDbSName, - pIndex->zName, - pTab->zName, - iMem, - zStmt - ); + "INSERT INTO %Q." LEGACY_SCHEMA_TABLE " VALUES('index',%Q,%Q,#%d,%Q);", + db->aDb[iDb].zDbSName, + pIndex->zName, + pTab->zName, + iMem, + zStmt + ); sqlite3DbFree(db, zStmt); /* Fill the index with data and reparse the schema. Code an OP_Expire @@ -116619,7 +118399,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( ** The list was already ordered when this routine was entered, so at this ** point at most a single index (the newly added index) will be out of ** order. So we have to reorder at most one index. */ - Index **ppFrom = &pTab->pIndex; + Index **ppFrom; Index *pThis; for(ppFrom=&pTab->pIndex; (pThis = *ppFrom)!=0; ppFrom=&pThis->pNext){ Index *pNext; @@ -116717,10 +118497,10 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists sqlite3 *db = pParse->db; int iDb; - assert( pParse->nErr==0 ); /* Never called with prior errors */ if( db->mallocFailed ){ goto exit_drop_index; } + assert( pParse->nErr==0 ); /* Never called with prior non-OOM errors */ assert( pName->nSrc==1 ); if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto exit_drop_index; @@ -116763,7 +118543,7 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists if( v ){ sqlite3BeginWriteOperation(pParse, 1, iDb); sqlite3NestedParse(pParse, - "DELETE FROM %Q." DFLT_SCHEMA_TABLE " WHERE name=%Q AND type='index'", + "DELETE FROM %Q." LEGACY_SCHEMA_TABLE " WHERE name=%Q AND type='index'", db->aDb[iDb].zDbSName, pIndex->zName ); sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName); @@ -117131,7 +118911,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( pItem->pUsing = pUsing; return p; - append_from_error: +append_from_error: assert( p==0 ); sqlite3ExprDelete(db, pOn); sqlite3IdListDelete(db, pUsing); @@ -117159,6 +118939,7 @@ SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pI }else{ pItem->u1.zIndexedBy = sqlite3NameFromToken(pParse->db, pIndexedBy); pItem->fg.isIndexedBy = 1; + assert( pItem->fg.isCte==0 ); /* No collision on union u2 */ } } } @@ -117471,7 +119252,7 @@ SQLITE_PRIVATE void sqlite3UniqueConstraint( for(j=0; jnKeyCol; j++){ char *zCol; assert( pIdx->aiColumn[j]>=0 ); - zCol = pTab->aCol[pIdx->aiColumn[j]].zName; + zCol = pTab->aCol[pIdx->aiColumn[j]].zCnName; if( j ) sqlite3_str_append(&errMsg, ", ", 2); sqlite3_str_appendall(&errMsg, pTab->zName); sqlite3_str_append(&errMsg, ".", 1); @@ -117498,7 +119279,7 @@ SQLITE_PRIVATE void sqlite3RowidConstraint( int rc; if( pTab->iPKey>=0 ){ zMsg = sqlite3MPrintf(pParse->db, "%s.%s", pTab->zName, - pTab->aCol[pTab->iPKey].zName); + pTab->aCol[pTab->iPKey].zCnName); rc = SQLITE_CONSTRAINT_PRIMARYKEY; }else{ zMsg = sqlite3MPrintf(pParse->db, "%s.rowid", pTab->zName); @@ -118139,6 +119920,7 @@ SQLITE_PRIVATE FuncDef *sqlite3FunctionSearch( ){ FuncDef *p; for(p=sqlite3BuiltinFunctions.a[h]; p; p=p->u.pHash){ + assert( p->funcFlags & SQLITE_FUNC_BUILTIN ); if( sqlite3StrICmp(p->zName, zFunc)==0 ){ return p; } @@ -118159,7 +119941,7 @@ SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs( const char *zName = aDef[i].zName; int nName = sqlite3Strlen30(zName); int h = SQLITE_FUNC_HASH(zName[0], nName); - assert( zName[0]>='a' && zName[0]<='z' ); + assert( aDef[i].funcFlags & SQLITE_FUNC_BUILTIN ); pOther = sqlite3FunctionSearch(h, zName); if( pOther ){ assert( pOther!=&aDef[i] && pOther->pNext!=&aDef[i] ); @@ -118385,6 +120167,16 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ return pTab; } +/* Generate byte-code that will report the number of rows modified +** by a DELETE, INSERT, or UPDATE statement. +*/ +SQLITE_PRIVATE void sqlite3CodeChangeCount(Vdbe *v, int regCounter, const char *zColName){ + sqlite3VdbeAddOp0(v, OP_FkCheck); + sqlite3VdbeAddOp2(v, OP_ResultRow, regCounter, 1); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zColName, SQLITE_STATIC); +} + /* Return true if table pTab is read-only. ** ** A table is read-only if any of the following are true: @@ -118425,7 +120217,7 @@ SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ return 1; } #ifndef SQLITE_OMIT_VIEW - if( !viewOk && pTab->pSelect ){ + if( !viewOk && IsView(pTab) ){ sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName); return 1; } @@ -118529,13 +120321,13 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( }else{ Index *pPk = sqlite3PrimaryKeyIndex(pTab); if( pPk->nKeyCol==1 ){ - const char *zName = pTab->aCol[pPk->aiColumn[0]].zName; + const char *zName = pTab->aCol[pPk->aiColumn[0]].zCnName; pLhs = sqlite3Expr(db, TK_ID, zName); pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, zName)); }else{ int i; for(i=0; inKeyCol; i++){ - Expr *p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zName); + Expr *p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName); pEList = sqlite3ExprListAppend(pParse, pEList, p); } pLhs = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); @@ -118551,6 +120343,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( pSelectSrc = sqlite3SrcListDup(db, pSrc, 0); pSrc->a[0].pTab = pTab; if( pSrc->a[0].fg.isIndexedBy ){ + assert( pSrc->a[0].fg.isCte==0 ); pSrc->a[0].u2.pIBIndex = 0; pSrc->a[0].fg.isIndexedBy = 0; sqlite3DbFree(db, pSrc->a[0].u1.zIndexedBy); @@ -118623,9 +120416,11 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( memset(&sContext, 0, sizeof(sContext)); db = pParse->db; - if( pParse->nErr || db->mallocFailed ){ + assert( db->pParse==pParse ); + if( pParse->nErr ){ goto delete_from_cleanup; } + assert( db->mallocFailed==0 ); assert( pTabList->nSrc==1 ); @@ -118642,7 +120437,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( */ #ifndef SQLITE_OMIT_TRIGGER pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); - isView = pTab->pSelect!=0; + isView = IsView(pTab); #else # define pTrigger 0 # define isView 0 @@ -118806,7 +120601,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( ** ONEPASS_SINGLE: One-pass approach - at most one row deleted. ** ONEPASS_MULTI: One-pass approach - any number of rows may be deleted. */ - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, wcf, iTabCur+1); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0,0,wcf,iTabCur+1); if( pWInfo==0 ) goto delete_from_cleanup; eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI ); @@ -118892,7 +120687,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( if( eOnePass!=ONEPASS_OFF ){ assert( nKey==nPk ); /* OP_Found will use an unpacked key */ if( !IsVirtual(pTab) && aToOpen[iDataCur-iTabCur] ){ - assert( pPk!=0 || pTab->pSelect!=0 ); + assert( pPk!=0 || IsView(pTab) ); sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, addrBypass, iKey, nKey); VdbeCoverage(v); } @@ -118959,9 +120754,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( ** invoke the callback function. */ if( memCnt ){ - sqlite3VdbeAddOp2(v, OP_ChngCntRow, memCnt, 1); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", SQLITE_STATIC); + sqlite3CodeChangeCount(v, memCnt, "rows deleted"); } delete_from_cleanup: @@ -119126,7 +120919,7 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete( ** the update-hook is not invoked for rows removed by REPLACE, but the ** pre-update-hook is. */ - if( pTab->pSelect==0 ){ + if( !IsView(pTab) ){ u8 p5 = 0; sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,iIdxNoSeek); sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0)); @@ -119412,6 +121205,18 @@ static void typeofFunc( sqlite3_result_text(context, azType[i], -1, SQLITE_STATIC); } +/* subtype(X) +** +** Return the subtype of X +*/ +static void subtypeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + UNUSED_PARAMETER(argc); + sqlite3_result_int(context, sqlite3_value_subtype(argv[0])); +} /* ** Implementation of the length() function @@ -119573,7 +121378,7 @@ static void instrFunc( } /* -** Implementation of the printf() function. +** Implementation of the printf() (a.k.a. format()) SQL function. */ static void printfFunc( sqlite3_context *context, @@ -119886,9 +121691,9 @@ static void last_insert_rowid( /* ** Implementation of the changes() SQL function. ** -** IMP: R-62073-11209 The changes() SQL function is a wrapper -** around the sqlite3_changes() C/C++ function and hence follows the same -** rules for counting changes. +** IMP: R-32760-32347 The changes() SQL function is a wrapper +** around the sqlite3_changes64() C/C++ function and hence follows the +** same rules for counting changes. */ static void changes( sqlite3_context *context, @@ -119897,12 +121702,12 @@ static void changes( ){ sqlite3 *db = sqlite3_context_db_handle(context); UNUSED_PARAMETER2(NotUsed, NotUsed2); - sqlite3_result_int(context, sqlite3_changes(db)); + sqlite3_result_int64(context, sqlite3_changes64(db)); } /* ** Implementation of the total_changes() SQL function. The return value is -** the same as the sqlite3_total_changes() API function. +** the same as the sqlite3_total_changes64() API function. */ static void total_changes( sqlite3_context *context, @@ -119911,9 +121716,9 @@ static void total_changes( ){ sqlite3 *db = sqlite3_context_db_handle(context); UNUSED_PARAMETER2(NotUsed, NotUsed2); - /* IMP: R-52756-41993 This function is a wrapper around the - ** sqlite3_total_changes() C/C++ interface. */ - sqlite3_result_int(context, sqlite3_total_changes(db)); + /* IMP: R-11217-42568 This function is a wrapper around the + ** sqlite3_total_changes64() C/C++ interface. */ + sqlite3_result_int64(context, sqlite3_total_changes64(db)); } /* @@ -120342,39 +122147,42 @@ static const char hexdigits[] = { }; /* -** Implementation of the QUOTE() function. This function takes a single -** argument. If the argument is numeric, the return value is the same as -** the argument. If the argument is NULL, the return value is the string -** "NULL". Otherwise, the argument is enclosed in single quotes with -** single-quote escapes. +** Append to pStr text that is the SQL literal representation of the +** value contained in pValue. */ -static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - assert( argc==1 ); - UNUSED_PARAMETER(argc); - switch( sqlite3_value_type(argv[0]) ){ +SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ + /* As currently implemented, the string must be initially empty. + ** we might relax this requirement in the future, but that will + ** require enhancements to the implementation. */ + assert( pStr!=0 && pStr->nChar==0 ); + + switch( sqlite3_value_type(pValue) ){ case SQLITE_FLOAT: { double r1, r2; - char zBuf[50]; - r1 = sqlite3_value_double(argv[0]); - sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.15g", r1); - sqlite3AtoF(zBuf, &r2, 20, SQLITE_UTF8); - if( r1!=r2 ){ - sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.20e", r1); + const char *zVal; + r1 = sqlite3_value_double(pValue); + sqlite3_str_appendf(pStr, "%!.15g", r1); + zVal = sqlite3_str_value(pStr); + if( zVal ){ + sqlite3AtoF(zVal, &r2, pStr->nChar, SQLITE_UTF8); + if( r1!=r2 ){ + sqlite3_str_reset(pStr); + sqlite3_str_appendf(pStr, "%!.20e", r1); + } } - sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); break; } case SQLITE_INTEGER: { - sqlite3_result_value(context, argv[0]); + sqlite3_str_appendf(pStr, "%lld", sqlite3_value_int64(pValue)); break; } case SQLITE_BLOB: { - char *zText = 0; - char const *zBlob = sqlite3_value_blob(argv[0]); - int nBlob = sqlite3_value_bytes(argv[0]); - assert( zBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ - zText = (char *)contextMalloc(context, (2*(i64)nBlob)+4); - if( zText ){ + char const *zBlob = sqlite3_value_blob(pValue); + int nBlob = sqlite3_value_bytes(pValue); + assert( zBlob==sqlite3_value_blob(pValue) ); /* No encoding change */ + sqlite3StrAccumEnlarge(pStr, nBlob*2 + 4); + if( pStr->accError==0 ){ + char *zText = pStr->zText; int i; for(i=0; i>4)&0x0F]; @@ -120384,42 +122192,48 @@ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ zText[(nBlob*2)+3] = '\0'; zText[0] = 'X'; zText[1] = '\''; - sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT); - sqlite3_free(zText); + pStr->nChar = nBlob*2 + 3; } break; } case SQLITE_TEXT: { - int i,j; - u64 n; - const unsigned char *zArg = sqlite3_value_text(argv[0]); - char *z; - - if( zArg==0 ) return; - for(i=0, n=0; zArg[i]; i++){ if( zArg[i]=='\'' ) n++; } - z = contextMalloc(context, ((i64)i)+((i64)n)+3); - if( z ){ - z[0] = '\''; - for(i=0, j=1; zArg[i]; i++){ - z[j++] = zArg[i]; - if( zArg[i]=='\'' ){ - z[j++] = '\''; - } - } - z[j++] = '\''; - z[j] = 0; - sqlite3_result_text(context, z, j, sqlite3_free); - } + const unsigned char *zArg = sqlite3_value_text(pValue); + sqlite3_str_appendf(pStr, "%Q", zArg); break; } default: { - assert( sqlite3_value_type(argv[0])==SQLITE_NULL ); - sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC); + assert( sqlite3_value_type(pValue)==SQLITE_NULL ); + sqlite3_str_append(pStr, "NULL", 4); break; } } } +/* +** Implementation of the QUOTE() function. +** +** The quote(X) function returns the text of an SQL literal which is the +** value of its argument suitable for inclusion into an SQL statement. +** Strings are surrounded by single-quotes with escapes on interior quotes +** as needed. BLOBs are encoded as hexadecimal literals. Strings with +** embedded NUL characters cannot be represented as string literals in SQL +** and hence the returned string literal is truncated prior to the first NUL. +*/ +static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + sqlite3_str str; + sqlite3 *db = sqlite3_context_db_handle(context); + assert( argc==1 ); + UNUSED_PARAMETER(argc); + sqlite3StrAccumInit(&str, db, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]); + sqlite3QuoteValue(&str,argv[0]); + sqlite3_result_text(context, sqlite3StrAccumFinish(&str), str.nChar, + SQLITE_DYNAMIC); + if( str.accError!=SQLITE_OK ){ + sqlite3_result_null(context); + sqlite3_result_error_code(context, str.accError); + } +} + /* ** The unicode() function. Return the integer unicode code-point value ** for the first character of the input string. @@ -121030,97 +122844,167 @@ static void minMaxFinalize(sqlite3_context *context){ /* ** group_concat(EXPR, ?SEPARATOR?) +** +** The SEPARATOR goes before the EXPR string. This is tragic. The +** groupConcatInverse() implementation would have been easier if the +** SEPARATOR were appended after EXPR. And the order is undocumented, +** so we could change it, in theory. But the old behavior has been +** around for so long that we dare not, for fear of breaking something. */ +typedef struct { + StrAccum str; /* The accumulated concatenation */ +#ifndef SQLITE_OMIT_WINDOWFUNC + int nAccum; /* Number of strings presently concatenated */ + int nFirstSepLength; /* Used to detect separator length change */ + /* If pnSepLengths!=0, refs an array of inter-string separator lengths, + ** stored as actually incorporated into presently accumulated result. + ** (Hence, its slots in use number nAccum-1 between method calls.) + ** If pnSepLengths==0, nFirstSepLength is the length used throughout. + */ + int *pnSepLengths; +#endif +} GroupConcatCtx; + static void groupConcatStep( sqlite3_context *context, int argc, sqlite3_value **argv ){ const char *zVal; - StrAccum *pAccum; + GroupConcatCtx *pGCC; const char *zSep; int nVal, nSep; assert( argc==1 || argc==2 ); if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum)); - - if( pAccum ){ + pGCC = (GroupConcatCtx*)sqlite3_aggregate_context(context, sizeof(*pGCC)); + if( pGCC ){ sqlite3 *db = sqlite3_context_db_handle(context); - int firstTerm = pAccum->mxAlloc==0; - pAccum->mxAlloc = db->aLimit[SQLITE_LIMIT_LENGTH]; - if( !firstTerm ){ - if( argc==2 ){ - zSep = (char*)sqlite3_value_text(argv[1]); - nSep = sqlite3_value_bytes(argv[1]); - }else{ - zSep = ","; - nSep = 1; + int firstTerm = pGCC->str.mxAlloc==0; + pGCC->str.mxAlloc = db->aLimit[SQLITE_LIMIT_LENGTH]; + if( argc==1 ){ + if( !firstTerm ){ + sqlite3_str_appendchar(&pGCC->str, 1, ','); + } +#ifndef SQLITE_OMIT_WINDOWFUNC + else{ + pGCC->nFirstSepLength = 1; + } +#endif + }else if( !firstTerm ){ + zSep = (char*)sqlite3_value_text(argv[1]); + nSep = sqlite3_value_bytes(argv[1]); + if( zSep ){ + sqlite3_str_append(&pGCC->str, zSep, nSep); + } +#ifndef SQLITE_OMIT_WINDOWFUNC + else{ + nSep = 0; + } + if( nSep != pGCC->nFirstSepLength || pGCC->pnSepLengths != 0 ){ + int *pnsl = pGCC->pnSepLengths; + if( pnsl == 0 ){ + /* First separator length variation seen, start tracking them. */ + pnsl = (int*)sqlite3_malloc64((pGCC->nAccum+1) * sizeof(int)); + if( pnsl!=0 ){ + int i = 0, nA = pGCC->nAccum-1; + while( inFirstSepLength; + } + }else{ + pnsl = (int*)sqlite3_realloc64(pnsl, pGCC->nAccum * sizeof(int)); + } + if( pnsl!=0 ){ + if( ALWAYS(pGCC->nAccum>0) ){ + pnsl[pGCC->nAccum-1] = nSep; + } + pGCC->pnSepLengths = pnsl; + }else{ + sqlite3StrAccumSetError(&pGCC->str, SQLITE_NOMEM); + } } - if( zSep ) sqlite3_str_append(pAccum, zSep, nSep); +#endif } +#ifndef SQLITE_OMIT_WINDOWFUNC + else{ + pGCC->nFirstSepLength = sqlite3_value_bytes(argv[1]); + } + pGCC->nAccum += 1; +#endif zVal = (char*)sqlite3_value_text(argv[0]); nVal = sqlite3_value_bytes(argv[0]); - if( zVal ) sqlite3_str_append(pAccum, zVal, nVal); + if( zVal ) sqlite3_str_append(&pGCC->str, zVal, nVal); } } + #ifndef SQLITE_OMIT_WINDOWFUNC static void groupConcatInverse( sqlite3_context *context, int argc, sqlite3_value **argv ){ - int n; - StrAccum *pAccum; + GroupConcatCtx *pGCC; assert( argc==1 || argc==2 ); + (void)argc; /* Suppress unused parameter warning */ if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum)); - /* pAccum is always non-NULL since groupConcatStep() will have always + pGCC = (GroupConcatCtx*)sqlite3_aggregate_context(context, sizeof(*pGCC)); + /* pGCC is always non-NULL since groupConcatStep() will have always ** run frist to initialize it */ - if( ALWAYS(pAccum) ){ - n = sqlite3_value_bytes(argv[0]); - if( argc==2 ){ - n += sqlite3_value_bytes(argv[1]); + if( ALWAYS(pGCC) ){ + int nVS; + /* Must call sqlite3_value_text() to convert the argument into text prior + ** to invoking sqlite3_value_bytes(), in case the text encoding is UTF16 */ + (void)sqlite3_value_text(argv[0]); + nVS = sqlite3_value_bytes(argv[0]); + pGCC->nAccum -= 1; + if( pGCC->pnSepLengths!=0 ){ + assert(pGCC->nAccum >= 0); + if( pGCC->nAccum>0 ){ + nVS += *pGCC->pnSepLengths; + memmove(pGCC->pnSepLengths, pGCC->pnSepLengths+1, + (pGCC->nAccum-1)*sizeof(int)); + } }else{ - n++; + /* If removing single accumulated string, harmlessly over-do. */ + nVS += pGCC->nFirstSepLength; } - if( n>=(int)pAccum->nChar ){ - pAccum->nChar = 0; + if( nVS>=(int)pGCC->str.nChar ){ + pGCC->str.nChar = 0; }else{ - pAccum->nChar -= n; - memmove(pAccum->zText, &pAccum->zText[n], pAccum->nChar); + pGCC->str.nChar -= nVS; + memmove(pGCC->str.zText, &pGCC->str.zText[nVS], pGCC->str.nChar); + } + if( pGCC->str.nChar==0 ){ + pGCC->str.mxAlloc = 0; + sqlite3_free(pGCC->pnSepLengths); + pGCC->pnSepLengths = 0; } - if( pAccum->nChar==0 ) pAccum->mxAlloc = 0; } } #else # define groupConcatInverse 0 #endif /* SQLITE_OMIT_WINDOWFUNC */ static void groupConcatFinalize(sqlite3_context *context){ - StrAccum *pAccum; - pAccum = sqlite3_aggregate_context(context, 0); - if( pAccum ){ - if( pAccum->accError==SQLITE_TOOBIG ){ - sqlite3_result_error_toobig(context); - }else if( pAccum->accError==SQLITE_NOMEM ){ - sqlite3_result_error_nomem(context); - }else{ - sqlite3_result_text(context, sqlite3StrAccumFinish(pAccum), -1, - sqlite3_free); - } + GroupConcatCtx *pGCC + = (GroupConcatCtx*)sqlite3_aggregate_context(context, 0); + if( pGCC ){ + sqlite3ResultStrAccum(context, &pGCC->str); +#ifndef SQLITE_OMIT_WINDOWFUNC + sqlite3_free(pGCC->pnSepLengths); +#endif } } #ifndef SQLITE_OMIT_WINDOWFUNC static void groupConcatValue(sqlite3_context *context){ - sqlite3_str *pAccum; - pAccum = (sqlite3_str*)sqlite3_aggregate_context(context, 0); - if( pAccum ){ + GroupConcatCtx *pGCC + = (GroupConcatCtx*)sqlite3_aggregate_context(context, 0); + if( pGCC ){ + StrAccum *pAccum = &pGCC->str; if( pAccum->accError==SQLITE_TOOBIG ){ sqlite3_result_error_toobig(context); }else if( pAccum->accError==SQLITE_NOMEM ){ sqlite3_result_error_nomem(context); }else{ const char *zText = sqlite3_str_value(pAccum); - sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT); + sqlite3_result_text(context, zText, pAccum->nChar, SQLITE_TRANSIENT); } } } @@ -121184,11 +123068,12 @@ SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocas int nExpr; assert( pExpr!=0 ); assert( pExpr->op==TK_FUNCTION ); + assert( ExprUseXList(pExpr) ); if( !pExpr->x.pList ){ return 0; } - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); nExpr = pExpr->x.pList->nExpr; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); pDef = sqlite3FindFunction(db, pExpr->u.zToken, nExpr, SQLITE_UTF8, 0); #ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION if( pDef==0 ) return 0; @@ -121212,6 +123097,7 @@ SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocas Expr *pEscape = pExpr->x.pList->a[2].pExpr; char *zEscape; if( pEscape->op!=TK_STRING ) return 0; + assert( !ExprHasProperty(pEscape, EP_IntValue) ); zEscape = pEscape->u.zToken; if( zEscape[0]==0 || zEscape[1]!=0 ) return 0; if( zEscape[0]==aWc[0] ) return 0; @@ -121438,12 +123324,12 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ */ static FuncDef aBuiltinFunc[] = { /***** Functions only available with SQLITE_TESTCTRL_INTERNAL_FUNCTIONS *****/ +#if !defined(SQLITE_UNTESTABLE) TEST_FUNC(implies_nonnull_row, 2, INLINEFUNC_implies_nonnull_row, 0), TEST_FUNC(expr_compare, 2, INLINEFUNC_expr_compare, 0), TEST_FUNC(expr_implies_expr, 2, INLINEFUNC_expr_implies_expr, 0), -#ifdef SQLITE_DEBUG - TEST_FUNC(affinity, 1, INLINEFUNC_affinity, 0), -#endif + TEST_FUNC(affinity, 1, INLINEFUNC_affinity, 0), +#endif /* !defined(SQLITE_UNTESTABLE) */ /***** Regular functions *****/ #ifdef SQLITE_SOUNDEX FUNCTION(soundex, 1, 0, 0, soundexFunc ), @@ -121463,8 +123349,8 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ INLINE_FUNC(likelihood, 2, INLINEFUNC_unlikely, SQLITE_FUNC_UNLIKELY), INLINE_FUNC(likely, 1, INLINEFUNC_unlikely, SQLITE_FUNC_UNLIKELY), #ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC - FUNCTION2(sqlite_offset, 1, 0, 0, noopFunc, SQLITE_FUNC_OFFSET| - SQLITE_FUNC_TYPEOF), + {1, SQLITE_FUNC_BUILTIN|SQLITE_UTF8|SQLITE_FUNC_OFFSET|SQLITE_FUNC_TYPEOF, + 0, 0, noopFunc, 0, 0, 0, "sqlite_offset", {0} }, #endif FUNCTION(ltrim, 1, 1, 0, trimFunc ), FUNCTION(ltrim, 2, 1, 0, trimFunc ), @@ -121475,15 +123361,17 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ FUNCTION(min, -1, 0, 1, minmaxFunc ), FUNCTION(min, 0, 0, 1, 0 ), WAGGREGATE(min, 1, 0, 1, minmaxStep, minMaxFinalize, minMaxValue, 0, - SQLITE_FUNC_MINMAX ), + SQLITE_FUNC_MINMAX|SQLITE_FUNC_ANYORDER ), FUNCTION(max, -1, 1, 1, minmaxFunc ), FUNCTION(max, 0, 1, 1, 0 ), WAGGREGATE(max, 1, 1, 1, minmaxStep, minMaxFinalize, minMaxValue, 0, - SQLITE_FUNC_MINMAX ), + SQLITE_FUNC_MINMAX|SQLITE_FUNC_ANYORDER ), FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF), + FUNCTION2(subtype, 1, 0, 0, subtypeFunc, SQLITE_FUNC_TYPEOF), FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH), FUNCTION(instr, 2, 0, 0, instrFunc ), FUNCTION(printf, -1, 0, 0, printfFunc ), + FUNCTION(format, -1, 0, 0, printfFunc ), FUNCTION(unicode, 1, 0, 0, unicodeFunc ), FUNCTION(char, -1, 0, 0, charFunc ), FUNCTION(abs, 1, 0, 0, absFunc ), @@ -121515,9 +123403,10 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ WAGGREGATE(total, 1,0,0, sumStep,totalFinalize,totalFinalize,sumInverse, 0), WAGGREGATE(avg, 1,0,0, sumStep, avgFinalize, avgFinalize, sumInverse, 0), WAGGREGATE(count, 0,0,0, countStep, - countFinalize, countFinalize, countInverse, SQLITE_FUNC_COUNT ), + countFinalize, countFinalize, countInverse, + SQLITE_FUNC_COUNT|SQLITE_FUNC_ANYORDER ), WAGGREGATE(count, 1,0,0, countStep, - countFinalize, countFinalize, countInverse, 0 ), + countFinalize, countFinalize, countInverse, SQLITE_FUNC_ANYORDER ), WAGGREGATE(group_concat, 1, 0, 0, groupConcatStep, groupConcatFinalize, groupConcatValue, groupConcatInverse, 0), WAGGREGATE(group_concat, 2, 0, 0, groupConcatStep, @@ -121581,6 +123470,7 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ #endif sqlite3WindowFunctions(); sqlite3RegisterDateTimeFunctions(); + sqlite3RegisterJsonFunctions(); sqlite3InsertBuiltinFuncs(aBuiltinFunc, ArraySize(aBuiltinFunc)); #if 0 /* Enable to print out how the built-in functions are hashed */ @@ -121592,6 +123482,7 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ for(p=sqlite3BuiltinFunctions.a[i]; p; p=p->u.pHash){ int n = sqlite3Strlen30(p->zName); int h = p->zName[0] + n; + assert( p->funcFlags & SQLITE_FUNC_BUILTIN ); printf(" %s(%d)", p->zName, h); } printf("\n"); @@ -121819,7 +123710,9 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex( */ if( pParent->iPKey>=0 ){ if( !zKey ) return 0; - if( !sqlite3StrICmp(pParent->aCol[pParent->iPKey].zName, zKey) ) return 0; + if( !sqlite3StrICmp(pParent->aCol[pParent->iPKey].zCnName, zKey) ){ + return 0; + } } }else if( paiCol ){ assert( nCol>1 ); @@ -121861,11 +123754,11 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex( /* If the index uses a collation sequence that is different from ** the default collation sequence for the column, this index is ** unusable. Bail out early in this case. */ - zDfltColl = pParent->aCol[iCol].zColl; + zDfltColl = sqlite3ColumnColl(&pParent->aCol[iCol]); if( !zDfltColl ) zDfltColl = sqlite3StrBINARY; if( sqlite3StrICmp(pIdx->azColl[i], zDfltColl) ) break; - zIdxCol = pParent->aCol[iCol].zName; + zIdxCol = pParent->aCol[iCol].zCnName; for(j=0; jaCol[j].zCol, zIdxCol)==0 ){ if( aiCol ) aiCol[i] = pFKey->aCol[j].iFrom; @@ -122089,7 +123982,7 @@ static Expr *exprTableRegister( pCol = &pTab->aCol[iCol]; pExpr->iTable = regBase + sqlite3TableColumnToStorage(pTab,iCol) + 1; pExpr->affExpr = pCol->affinity; - zColl = pCol->zColl; + zColl = sqlite3ColumnColl(pCol); if( zColl==0 ) zColl = db->pDfltColl->zName; pExpr = sqlite3ExprAddCollateString(pParse, pExpr, zColl); }else{ @@ -122112,6 +124005,7 @@ static Expr *exprTableColumn( ){ Expr *pExpr = sqlite3Expr(db, TK_COLUMN, 0); if( pExpr ){ + assert( ExprUseYTab(pExpr) ); pExpr->y.pTab = pTab; pExpr->iTable = iCursor; pExpr->iColumn = iCol; @@ -122198,7 +124092,7 @@ static void fkScanChildren( pLeft = exprTableRegister(pParse, pTab, regData, iCol); iCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom; assert( iCol>=0 ); - zCol = pFKey->pFrom->aCol[iCol].zName; + zCol = pFKey->pFrom->aCol[iCol].zCnName; pRight = sqlite3Expr(db, TK_ID, zCol); pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight); pWhere = sqlite3ExprAnd(pParse, pWhere, pEq); @@ -122233,7 +124127,7 @@ static void fkScanChildren( i16 iCol = pIdx->aiColumn[i]; assert( iCol>=0 ); pLeft = exprTableRegister(pParse, pTab, regData, iCol); - pRight = sqlite3Expr(db, TK_ID, pTab->aCol[iCol].zName); + pRight = sqlite3Expr(db, TK_ID, pTab->aCol[iCol].zCnName); pEq = sqlite3PExpr(pParse, TK_IS, pLeft, pRight); pAll = sqlite3ExprAnd(pParse, pAll, pEq); } @@ -122252,7 +124146,7 @@ static void fkScanChildren( ** clause. For each row found, increment either the deferred or immediate ** foreign key constraint counter. */ if( pParse->nErr==0 ){ - pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0, 0, 0, 0); + pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0, 0, 0, 0, 0); sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr); if( pWInfo ){ sqlite3WhereEnd(pWInfo); @@ -122303,6 +124197,25 @@ static void fkTriggerDelete(sqlite3 *dbMem, Trigger *p){ } } +/* +** Clear the apTrigger[] cache of CASCADE triggers for all foreign keys +** in a particular database. This needs to happen when the schema +** changes. +*/ +SQLITE_PRIVATE void sqlite3FkClearTriggerCache(sqlite3 *db, int iDb){ + HashElem *k; + Hash *pHash = &db->aDb[iDb].pSchema->tblHash; + for(k=sqliteHashFirst(pHash); k; k=sqliteHashNext(k)){ + Table *pTab = sqliteHashData(k); + FKey *pFKey; + if( !IsOrdinaryTable(pTab) ) continue; + for(pFKey=pTab->u.tab.pFKey; pFKey; pFKey=pFKey->pNextFrom){ + fkTriggerDelete(db, pFKey->apTrigger[0]); pFKey->apTrigger[0] = 0; + fkTriggerDelete(db, pFKey->apTrigger[1]); pFKey->apTrigger[1] = 0; + } + } +} + /* ** This function is called to generate code that runs when table pTab is ** being dropped from the database. The SrcList passed as the second argument @@ -122322,12 +124235,12 @@ static void fkTriggerDelete(sqlite3 *dbMem, Trigger *p){ */ SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTab){ sqlite3 *db = pParse->db; - if( (db->flags&SQLITE_ForeignKeys) && !IsVirtual(pTab) ){ + if( (db->flags&SQLITE_ForeignKeys) && IsOrdinaryTable(pTab) ){ int iSkip = 0; Vdbe *v = sqlite3GetVdbe(pParse); assert( v ); /* VDBE has already been allocated */ - assert( pTab->pSelect==0 ); /* Not a view */ + assert( IsOrdinaryTable(pTab) ); if( sqlite3FkReferences(pTab)==0 ){ /* Search for a deferred foreign key constraint for which this table ** is the child table. If one cannot be found, return without @@ -122335,7 +124248,7 @@ SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTa ** the entire DELETE if there are no outstanding deferred constraints ** when this statement is run. */ FKey *p; - for(p=pTab->pFKey; p; p=p->pNextFrom){ + for(p=pTab->u.tab.pFKey; p; p=p->pNextFrom){ if( p->isDeferred || (db->flags & SQLITE_DeferFKs) ) break; } if( !p ) return; @@ -122424,7 +124337,7 @@ static int fkParentIsModified( if( aChange[iKey]>=0 || (iKey==pTab->iPKey && bChngRowid) ){ Column *pCol = &pTab->aCol[iKey]; if( zKey ){ - if( 0==sqlite3StrICmp(pCol->zName, zKey) ) return 1; + if( 0==sqlite3StrICmp(pCol->zCnName, zKey) ) return 1; }else if( pCol->colFlags & COLFLAG_PRIMKEY ){ return 1; } @@ -122491,13 +124404,14 @@ SQLITE_PRIVATE void sqlite3FkCheck( /* If foreign-keys are disabled, this function is a no-op. */ if( (db->flags&SQLITE_ForeignKeys)==0 ) return; + if( !IsOrdinaryTable(pTab) ) return; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); zDb = db->aDb[iDb].zDbSName; /* Loop through all the foreign key constraints for which pTab is the ** child table (the table that the foreign key definition is part of). */ - for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ + for(pFKey=pTab->u.tab.pFKey; pFKey; pFKey=pFKey->pNextFrom){ Table *pTo; /* Parent table of foreign key pFKey */ Index *pIdx = 0; /* Index on key columns in pTo */ int *aiFree = 0; @@ -122564,7 +124478,7 @@ SQLITE_PRIVATE void sqlite3FkCheck( ** values read from the parent table are NULL. */ if( db->xAuth ){ int rcauth; - char *zCol = pTo->aCol[pIdx ? pIdx->aiColumn[i] : pTo->iPKey].zName; + char *zCol = pTo->aCol[pIdx ? pIdx->aiColumn[i] : pTo->iPKey].zCnName; rcauth = sqlite3AuthReadCol(pParse, pTo->zName, zCol, iDb); bIgnore = (rcauth==SQLITE_IGNORE); } @@ -122679,10 +124593,10 @@ SQLITE_PRIVATE u32 sqlite3FkOldmask( Table *pTab /* Table being modified */ ){ u32 mask = 0; - if( pParse->db->flags&SQLITE_ForeignKeys ){ + if( pParse->db->flags&SQLITE_ForeignKeys && IsOrdinaryTable(pTab) ){ FKey *p; int i; - for(p=pTab->pFKey; p; p=p->pNextFrom){ + for(p=pTab->u.tab.pFKey; p; p=p->pNextFrom){ for(i=0; inCol; i++) mask |= COLUMN_MASK(p->aCol[i].iFrom); } for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ @@ -122732,19 +124646,19 @@ SQLITE_PRIVATE int sqlite3FkRequired( ){ int eRet = 1; /* Value to return if bHaveFK is true */ int bHaveFK = 0; /* If FK processing is required */ - if( pParse->db->flags&SQLITE_ForeignKeys ){ + if( pParse->db->flags&SQLITE_ForeignKeys && IsOrdinaryTable(pTab) ){ if( !aChange ){ /* A DELETE operation. Foreign key processing is required if the ** table in question is either the child or parent table for any ** foreign key constraint. */ - bHaveFK = (sqlite3FkReferences(pTab) || pTab->pFKey); + bHaveFK = (sqlite3FkReferences(pTab) || pTab->u.tab.pFKey); }else{ /* This is an UPDATE. Foreign key processing is only required if the ** operation modifies one or more child or parent key columns. */ FKey *p; /* Check if any child key columns are being modified. */ - for(p=pTab->pFKey; p; p=p->pNextFrom){ + for(p=pTab->u.tab.pFKey; p; p=p->pNextFrom){ if( fkChildIsModified(pTab, p, aChange, chngRowid) ){ if( 0==sqlite3_stricmp(pTab->zName, p->zTo) ) eRet = 2; bHaveFK = 1; @@ -122837,8 +124751,8 @@ static Trigger *fkActionTrigger( assert( pIdx!=0 || (pTab->iPKey>=0 && pTab->iPKeynCol) ); assert( pIdx==0 || pIdx->aiColumn[i]>=0 ); sqlite3TokenInit(&tToCol, - pTab->aCol[pIdx ? pIdx->aiColumn[i] : pTab->iPKey].zName); - sqlite3TokenInit(&tFromCol, pFKey->pFrom->aCol[iFromCol].zName); + pTab->aCol[pIdx ? pIdx->aiColumn[i] : pTab->iPKey].zCnName); + sqlite3TokenInit(&tFromCol, pFKey->pFrom->aCol[iFromCol].zCnName); /* Create the expression "OLD.zToCol = zFromCol". It is important ** that the "OLD.zToCol" term is on the LHS of the = operator, so @@ -122883,7 +124797,7 @@ static Trigger *fkActionTrigger( testcase( pCol->colFlags & COLFLAG_STORED ); pDflt = 0; }else{ - pDflt = pCol->pDflt; + pDflt = sqlite3ColumnExpr(pFKey->pFrom, pCol); } if( pDflt ){ pNew = sqlite3ExprDup(db, pDflt, 0); @@ -123020,9 +124934,9 @@ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){ FKey *pFKey; /* Iterator variable */ FKey *pNext; /* Copy of pFKey->pNextFrom */ - assert( db==0 || IsVirtual(pTab) - || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) ); - for(pFKey=pTab->pFKey; pFKey; pFKey=pNext){ + assert( IsOrdinaryTable(pTab) ); + for(pFKey=pTab->u.tab.pFKey; pFKey; pFKey=pNext){ + assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) ); /* Remove the FK from the fkeyHash hash table. */ if( !db || db->pnBytesFreed==0 ){ @@ -123102,7 +125016,7 @@ SQLITE_PRIVATE void sqlite3OpenTable( }else{ Index *pPk = sqlite3PrimaryKeyIndex(pTab); assert( pPk!=0 ); - assert( pPk->tnum==pTab->tnum ); + assert( pPk->tnum==pTab->tnum || CORRUPT_DB ); sqlite3VdbeAddOp3(v, opcode, iCur, pPk->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pPk); VdbeComment((v, "%s", pTab->zName)); @@ -123169,28 +125083,68 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ } /* +** Make changes to the evolving bytecode to do affinity transformations +** of values that are about to be gathered into a row for table pTab. +** +** For ordinary (legacy, non-strict) tables: +** ----------------------------------------- +** ** Compute the affinity string for table pTab, if it has not already been ** computed. As an optimization, omit trailing SQLITE_AFF_BLOB affinities. ** -** If the affinity exists (if it is no entirely SQLITE_AFF_BLOB values) and -** if iReg>0 then code an OP_Affinity opcode that will set the affinities -** for register iReg and following. Or if affinities exists and iReg==0, +** If the affinity string is empty (because it was all SQLITE_AFF_BLOB entries +** which were then optimized out) then this routine becomes a no-op. +** +** Otherwise if iReg>0 then code an OP_Affinity opcode that will set the +** affinities for register iReg and following. Or if iReg==0, ** then just set the P4 operand of the previous opcode (which should be ** an OP_MakeRecord) to the affinity string. ** ** A column affinity string has one character per column: ** -** Character Column affinity -** ------------------------------ -** 'A' BLOB -** 'B' TEXT -** 'C' NUMERIC -** 'D' INTEGER -** 'E' REAL +** Character Column affinity +** --------- --------------- +** 'A' BLOB +** 'B' TEXT +** 'C' NUMERIC +** 'D' INTEGER +** 'E' REAL +** +** For STRICT tables: +** ------------------ +** +** Generate an appropropriate OP_TypeCheck opcode that will verify the +** datatypes against the column definitions in pTab. If iReg==0, that +** means an OP_MakeRecord opcode has already been generated and should be +** the last opcode generated. The new OP_TypeCheck needs to be inserted +** before the OP_MakeRecord. The new OP_TypeCheck should use the same +** register set as the OP_MakeRecord. If iReg>0 then register iReg is +** the first of a series of registers that will form the new record. +** Apply the type checking to that array of registers. */ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ int i, j; - char *zColAff = pTab->zColAff; + char *zColAff; + if( pTab->tabFlags & TF_Strict ){ + if( iReg==0 ){ + /* Move the previous opcode (which should be OP_MakeRecord) forward + ** by one slot and insert a new OP_TypeCheck where the current + ** OP_MakeRecord is found */ + VdbeOp *pPrev; + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); + pPrev = sqlite3VdbeGetOp(v, -1); + assert( pPrev!=0 ); + assert( pPrev->opcode==OP_MakeRecord || sqlite3VdbeDb(v)->mallocFailed ); + pPrev->opcode = OP_TypeCheck; + sqlite3VdbeAddOp3(v, OP_MakeRecord, pPrev->p1, pPrev->p2, pPrev->p3); + }else{ + /* Insert an isolated OP_Typecheck */ + sqlite3VdbeAddOp2(v, OP_TypeCheck, iReg, pTab->nNVCol); + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); + } + return; + } + zColAff = pTab->zColAff; if( zColAff==0 ){ sqlite3 *db = sqlite3VdbeDb(v); zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1); @@ -123200,7 +125154,7 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ } for(i=j=0; inCol; i++){ - assert( pTab->aCol[i].affinity!=0 ); + assert( pTab->aCol[i].affinity!=0 || sqlite3VdbeParser(v)->nErr>0 ); if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){ zColAff[j++] = pTab->aCol[i].affinity; } @@ -123216,6 +125170,8 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ if( iReg ){ sqlite3VdbeAddOp4(v, OP_Affinity, iReg, i, 0, zColAff, i); }else{ + assert( sqlite3VdbeGetOp(v, -1)->opcode==OP_MakeRecord + || sqlite3VdbeDb(v)->mallocFailed ); sqlite3VdbeChangeP4(v, -1, zColAff, i); } } @@ -123299,24 +125255,30 @@ SQLITE_PRIVATE void sqlite3ComputeGeneratedColumns( ** that appropriate affinity has been applied to the regular columns */ sqlite3TableAffinity(pParse->pVdbe, pTab, iRegStore); - if( (pTab->tabFlags & TF_HasStored)!=0 - && (pOp = sqlite3VdbeGetOp(pParse->pVdbe,-1))->opcode==OP_Affinity - ){ - /* Change the OP_Affinity argument to '@' (NONE) for all stored - ** columns. '@' is the no-op affinity and those columns have not - ** yet been computed. */ - int ii, jj; - char *zP4 = pOp->p4.z; - assert( zP4!=0 ); - assert( pOp->p4type==P4_DYNAMIC ); - for(ii=jj=0; zP4[jj]; ii++){ - if( pTab->aCol[ii].colFlags & COLFLAG_VIRTUAL ){ - continue; - } - if( pTab->aCol[ii].colFlags & COLFLAG_STORED ){ - zP4[jj] = SQLITE_AFF_NONE; + if( (pTab->tabFlags & TF_HasStored)!=0 ){ + pOp = sqlite3VdbeGetOp(pParse->pVdbe,-1); + if( pOp->opcode==OP_Affinity ){ + /* Change the OP_Affinity argument to '@' (NONE) for all stored + ** columns. '@' is the no-op affinity and those columns have not + ** yet been computed. */ + int ii, jj; + char *zP4 = pOp->p4.z; + assert( zP4!=0 ); + assert( pOp->p4type==P4_DYNAMIC ); + for(ii=jj=0; zP4[jj]; ii++){ + if( pTab->aCol[ii].colFlags & COLFLAG_VIRTUAL ){ + continue; + } + if( pTab->aCol[ii].colFlags & COLFLAG_STORED ){ + zP4[jj] = SQLITE_AFF_NONE; + } + jj++; } - jj++; + }else if( pOp->opcode==OP_TypeCheck ){ + /* If an OP_TypeCheck was generated because the table is STRICT, + ** then set the P3 operand to indicate that generated columns should + ** not be checked */ + pOp->p3 = 1; } } @@ -123352,7 +125314,7 @@ SQLITE_PRIVATE void sqlite3ComputeGeneratedColumns( int x; pCol->colFlags |= COLFLAG_BUSY; w.eCode = 0; - sqlite3WalkExpr(&w, pCol->pDflt); + sqlite3WalkExpr(&w, sqlite3ColumnExpr(pTab, pCol)); pCol->colFlags &= ~COLFLAG_BUSY; if( w.eCode & COLFLAG_NOTAVAIL ){ pRedo = pCol; @@ -123361,13 +125323,13 @@ SQLITE_PRIVATE void sqlite3ComputeGeneratedColumns( eProgress = 1; assert( pCol->colFlags & COLFLAG_GENERATED ); x = sqlite3TableColumnToStorage(pTab, i) + iRegStore; - sqlite3ExprCodeGeneratedColumn(pParse, pCol, x); + sqlite3ExprCodeGeneratedColumn(pParse, pTab, pCol, x); pCol->colFlags &= ~COLFLAG_NOTAVAIL; } } }while( pRedo && eProgress ); if( pRedo ){ - sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"", pRedo->zName); + sqlite3ErrorMsg(pParse, "generated column loop on \"%s\"", pRedo->zCnName); } pParse->iSelfTab = 0; } @@ -123726,9 +125688,11 @@ SQLITE_PRIVATE void sqlite3Insert( #endif db = pParse->db; - if( pParse->nErr || db->mallocFailed ){ + assert( db->pParse==pParse ); + if( pParse->nErr ){ goto insert_cleanup; } + assert( db->mallocFailed==0 ); dest.iSDParm = 0; /* Suppress a harmless compiler warning */ /* If the Select object is really just a simple VALUES() list with a @@ -123762,7 +125726,7 @@ SQLITE_PRIVATE void sqlite3Insert( */ #ifndef SQLITE_OMIT_TRIGGER pTrigger = sqlite3TriggersExist(pParse, pTab, TK_INSERT, 0, &tmask); - isView = pTab->pSelect!=0; + isView = IsView(pTab); #else # define pTrigger 0 # define tmask 0 @@ -123804,7 +125768,11 @@ SQLITE_PRIVATE void sqlite3Insert( ** ** This is the 2nd template. */ - if( pColumn==0 && xferOptimization(pParse, pTab, pSelect, onError, iDb) ){ + if( pColumn==0 + && pSelect!=0 + && pTrigger==0 + && xferOptimization(pParse, pTab, pSelect, onError, iDb) + ){ assert( !pTrigger ); assert( pList==0 ); goto insert_end; @@ -123853,7 +125821,7 @@ SQLITE_PRIVATE void sqlite3Insert( } for(i=0; inId; i++){ for(j=0; jnCol; j++){ - if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){ + if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zCnName)==0 ){ pColumn->a[i].idx = j; if( i!=j ) bIdListInOrder = 0; if( j==pTab->iPKey ){ @@ -123863,7 +125831,7 @@ SQLITE_PRIVATE void sqlite3Insert( if( pTab->aCol[j].colFlags & (COLFLAG_STORED|COLFLAG_VIRTUAL) ){ sqlite3ErrorMsg(pParse, "cannot INSERT into generated column \"%s\"", - pTab->aCol[j].zName); + pTab->aCol[j].zCnName); goto insert_cleanup; } #endif @@ -123904,7 +125872,9 @@ SQLITE_PRIVATE void sqlite3Insert( dest.nSdst = pTab->nCol; rc = sqlite3Select(pParse, pSelect, &dest); regFromSelect = dest.iSdst; - if( rc || db->mallocFailed || pParse->nErr ) goto insert_cleanup; + assert( db->pParse==pParse ); + if( rc || pParse->nErr ) goto insert_cleanup; + assert( db->mallocFailed==0 ); sqlite3VdbeEndCoroutine(v, regYield); sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ assert( pSelect->pEList ); @@ -124048,7 +126018,7 @@ SQLITE_PRIVATE void sqlite3Insert( pTab->zName); goto insert_cleanup; } - if( pTab->pSelect ){ + if( IsView(pTab) ){ sqlite3ErrorMsg(pParse, "cannot UPSERT a view"); goto insert_cleanup; } @@ -124147,7 +126117,9 @@ SQLITE_PRIVATE void sqlite3Insert( }else if( pColumn==0 ){ /* Hidden columns that are not explicitly named in the INSERT ** get there default value */ - sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore); + sqlite3ExprCodeFactorable(pParse, + sqlite3ColumnExpr(pTab, &pTab->aCol[i]), + iRegStore); continue; } } @@ -124156,13 +126128,17 @@ SQLITE_PRIVATE void sqlite3Insert( if( j>=pColumn->nId ){ /* A column not named in the insert column list gets its ** default value */ - sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore); + sqlite3ExprCodeFactorable(pParse, + sqlite3ColumnExpr(pTab, &pTab->aCol[i]), + iRegStore); continue; } k = j; }else if( nColumn==0 ){ /* This is INSERT INTO ... DEFAULT VALUES. Load the default value. */ - sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore); + sqlite3ExprCodeFactorable(pParse, + sqlite3ColumnExpr(pTab, &pTab->aCol[i]), + iRegStore); continue; }else{ k = i - nHidden; @@ -124387,9 +126363,7 @@ SQLITE_PRIVATE void sqlite3Insert( ** invoke the callback function. */ if( regRowCount ){ - sqlite3VdbeAddOp2(v, OP_ChngCntRow, regRowCount, 1); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", SQLITE_STATIC); + sqlite3CodeChangeCount(v, regRowCount, "rows inserted"); } insert_cleanup: @@ -124677,7 +126651,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( db = pParse->db; v = pParse->pVdbe; assert( v!=0 ); - assert( pTab->pSelect==0 ); /* This table is not a VIEW */ + assert( !IsView(pTab) ); /* This table is not a VIEW */ nCol = pTab->nCol; /* pPk is the PRIMARY KEY index for WITHOUT ROWID tables and NULL for @@ -124728,7 +126702,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } if( onError==OE_Replace ){ if( b2ndPass /* REPLACE becomes ABORT on the 2nd pass */ - || pCol->pDflt==0 /* REPLACE is ABORT if no DEFAULT value */ + || pCol->iDflt==0 /* REPLACE is ABORT if no DEFAULT value */ ){ testcase( pCol->colFlags & COLFLAG_VIRTUAL ); testcase( pCol->colFlags & COLFLAG_STORED ); @@ -124750,7 +126724,8 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( VdbeCoverage(v); assert( (pCol->colFlags & COLFLAG_GENERATED)==0 ); nSeenReplace++; - sqlite3ExprCodeCopy(pParse, pCol->pDflt, iReg); + sqlite3ExprCodeCopy(pParse, + sqlite3ColumnExpr(pTab, pCol), iReg); sqlite3VdbeJumpHere(v, addr1); break; } @@ -124760,7 +126735,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( case OE_Rollback: case OE_Fail: { char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName, - pCol->zName); + pCol->zCnName); sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError, iReg); sqlite3VdbeAppendP4(v, zMsg, P4_DYNAMIC); @@ -125013,6 +126988,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( if( onError==OE_Replace /* IPK rule is REPLACE */ && onError!=overrideError /* Rules for other constraints are different */ && pTab->pIndex /* There exist other constraints */ + && !upsertIpkDelay /* IPK check already deferred by UPSERT */ ){ ipkTop = sqlite3VdbeAddOp0(v, OP_Goto)+1; VdbeComment((v, "defer IPK REPLACE until last")); @@ -125178,7 +127154,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( testcase( sqlite3TableColumnToStorage(pTab, iField)!=iField ); x = sqlite3TableColumnToStorage(pTab, iField) + regNewData + 1; sqlite3VdbeAddOp2(v, OP_SCopy, x, regIdx+i); - VdbeComment((v, "%s", pTab->aCol[iField].zName)); + VdbeComment((v, "%s", pTab->aCol[iField].zCnName)); } } sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn, aRegIdx[ix]); @@ -125230,6 +127206,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** This is not possible for ENABLE_PREUPDATE_HOOK builds, as the row ** must be explicitly deleted in order to ensure any pre-update hook ** is invoked. */ + assert( IsOrdinaryTable(pTab) ); #ifndef SQLITE_ENABLE_PREUPDATE_HOOK if( (ix==0 && pIdx->pNext==0) /* Condition 3 */ && pPk==pIdx /* Condition 2 */ @@ -125237,7 +127214,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( && ( 0==(db->flags&SQLITE_RecTriggers) || /* Condition 4 */ 0==sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0)) && ( 0==(db->flags&SQLITE_ForeignKeys) || /* Condition 5 */ - (0==pTab->pFKey && 0==sqlite3FkReferences(pTab))) + (0==pTab->u.tab.pFKey && 0==sqlite3FkReferences(pTab))) ){ sqlite3VdbeResolveLabel(v, addrUniqueOk); continue; @@ -125272,7 +127249,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( x = sqlite3TableColumnToIndex(pIdx, pPk->aiColumn[i]); sqlite3VdbeAddOp3(v, OP_Column, iThisCur, x, regR+i); VdbeComment((v, "%s.%s", pTab->zName, - pTab->aCol[pPk->aiColumn[i]].zName)); + pTab->aCol[pPk->aiColumn[i]].zCnName)); } } if( isUpdate ){ @@ -125336,7 +127313,8 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( assert( onError==OE_Replace ); nConflictCk = sqlite3VdbeCurrentAddr(v) - addrConflictCk; - assert( nConflictCk>0 ); + assert( nConflictCk>0 || db->mallocFailed ); + testcase( nConflictCk<=0 ); testcase( nConflictCk>1 ); if( regTrigCnt ){ sqlite3MultiWrite(pParse); @@ -125419,6 +127397,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( if( ipkTop ){ sqlite3VdbeGoto(v, ipkTop); VdbeComment((v, "Do IPK REPLACE")); + assert( ipkBottom>0 ); sqlite3VdbeJumpHere(v, ipkBottom); } @@ -125471,7 +127450,7 @@ SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe *v, Table *pTab){ if( pTab->pSchema->file_format<2 ) return; for(i=pTab->nCol-1; i>0; i--){ - if( pTab->aCol[i].pDflt!=0 ) break; + if( pTab->aCol[i].iDflt!=0 ) break; if( pTab->aCol[i].colFlags & COLFLAG_PRIMKEY ) break; } sqlite3VdbeChangeP5(v, i+1); @@ -125536,7 +127515,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( v = pParse->pVdbe; assert( v!=0 ); - assert( pTab->pSelect==0 ); /* This table is not a VIEW */ + assert( !IsView(pTab) ); /* This table is not a VIEW */ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ /* All REPLACE indexes are at the end of the list */ assert( pIdx->onError!=OE_Replace @@ -125549,7 +127528,6 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( } pik_flags = (useSeekResult ? OPFLAG_USESEEKRESULT : 0); if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ - assert( pParse->nested==0 ); pik_flags |= OPFLAG_NCHANGE; pik_flags |= (update_flags & OPFLAG_SAVEPOSITION); if( update_flags==0 ){ @@ -125622,8 +127600,9 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices( assert( op==OP_OpenWrite || p5==0 ); if( IsVirtual(pTab) ){ /* This routine is a no-op for virtual tables. Leave the output - ** variables *piDataCur and *piIdxCur uninitialized so that valgrind - ** can detect if they are used by mistake in the caller. */ + ** variables *piDataCur and *piIdxCur set to illegal cursor numbers + ** for improved error detection. */ + *piDataCur = *piIdxCur = -999; return 0; } iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); @@ -125764,18 +127743,13 @@ static int xferOptimization( int destHasUniqueIdx = 0; /* True if pDest has a UNIQUE index */ int regData, regRowid; /* Registers holding data and rowid */ - if( pSelect==0 ){ - return 0; /* Must be of the form INSERT INTO ... SELECT ... */ - } + assert( pSelect!=0 ); if( pParse->pWith || pSelect->pWith ){ /* Do not attempt to process this query if there are an WITH clauses ** attached to it. Proceeding may generate a false "no such table: xxx" ** error if pSelect reads from a CTE named "xxx". */ return 0; } - if( sqlite3TriggerList(pParse, pDest) ){ - return 0; /* tab1 must not have triggers */ - } #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pDest) ){ return 0; /* tab1 must not be a virtual table */ @@ -125838,13 +127812,8 @@ static int xferOptimization( if( HasRowid(pDest)!=HasRowid(pSrc) ){ return 0; /* source and destination must both be WITHOUT ROWID or not */ } -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pSrc) ){ - return 0; /* tab2 must not be a virtual table */ - } -#endif - if( pSrc->pSelect ){ - return 0; /* tab2 may not be a view */ + if( !IsOrdinaryTable(pSrc) ){ + return 0; /* tab2 may not be a view or virtual table */ } if( pDest->nCol!=pSrc->nCol ){ return 0; /* Number of columns must be the same in tab1 and tab2 */ @@ -125852,6 +127821,9 @@ static int xferOptimization( if( pDest->iPKey!=pSrc->iPKey ){ return 0; /* Both tables must have the same INTEGER PRIMARY KEY */ } + if( (pDest->tabFlags & TF_Strict)!=0 && (pSrc->tabFlags & TF_Strict)==0 ){ + return 0; /* Cannot feed from a non-strict into a strict table */ + } for(i=0; inCol; i++){ Column *pDestCol = &pDest->aCol[i]; Column *pSrcCol = &pSrc->aCol[i]; @@ -125888,7 +127860,9 @@ static int xferOptimization( ** This requirement could be relaxed for VIRTUAL columns, I suppose. */ if( (pDestCol->colFlags & COLFLAG_GENERATED)!=0 ){ - if( sqlite3ExprCompare(0, pSrcCol->pDflt, pDestCol->pDflt, -1)!=0 ){ + if( sqlite3ExprCompare(0, + sqlite3ColumnExpr(pSrc, pSrcCol), + sqlite3ColumnExpr(pDest, pDestCol), -1)!=0 ){ testcase( pDestCol->colFlags & COLFLAG_VIRTUAL ); testcase( pDestCol->colFlags & COLFLAG_STORED ); return 0; /* Different generator expressions */ @@ -125898,7 +127872,8 @@ static int xferOptimization( if( pDestCol->affinity!=pSrcCol->affinity ){ return 0; /* Affinity must be the same on all columns */ } - if( sqlite3_stricmp(pDestCol->zColl, pSrcCol->zColl)!=0 ){ + if( sqlite3_stricmp(sqlite3ColumnColl(pDestCol), + sqlite3ColumnColl(pSrcCol))!=0 ){ return 0; /* Collating sequence must be the same on all columns */ } if( pDestCol->notNull && !pSrcCol->notNull ){ @@ -125906,11 +127881,15 @@ static int xferOptimization( } /* Default values for second and subsequent columns need to match. */ if( (pDestCol->colFlags & COLFLAG_GENERATED)==0 && i>0 ){ - assert( pDestCol->pDflt==0 || pDestCol->pDflt->op==TK_SPAN ); - assert( pSrcCol->pDflt==0 || pSrcCol->pDflt->op==TK_SPAN ); - if( (pDestCol->pDflt==0)!=(pSrcCol->pDflt==0) - || (pDestCol->pDflt && strcmp(pDestCol->pDflt->u.zToken, - pSrcCol->pDflt->u.zToken)!=0) + Expr *pDestExpr = sqlite3ColumnExpr(pDest, pDestCol); + Expr *pSrcExpr = sqlite3ColumnExpr(pSrc, pSrcCol); + assert( pDestExpr==0 || pDestExpr->op==TK_SPAN ); + assert( pDestExpr==0 || !ExprHasProperty(pDestExpr, EP_IntValue) ); + assert( pSrcExpr==0 || pSrcExpr->op==TK_SPAN ); + assert( pSrcExpr==0 || !ExprHasProperty(pSrcExpr, EP_IntValue) ); + if( (pDestExpr==0)!=(pSrcExpr==0) + || (pDestExpr!=0 && strcmp(pDestExpr->u.zToken, + pSrcExpr->u.zToken)!=0) ){ return 0; /* Default values must be the same for all columns */ } @@ -125947,7 +127926,8 @@ static int xferOptimization( ** the extra complication to make this rule less restrictive is probably ** not worth the effort. Ticket [6284df89debdfa61db8073e062908af0c9b6118e] */ - if( (db->flags & SQLITE_ForeignKeys)!=0 && pDest->pFKey!=0 ){ + assert( IsOrdinaryTable(pDest) ); + if( (db->flags & SQLITE_ForeignKeys)!=0 && pDest->u.tab.pFKey!=0 ){ return 0; } #endif @@ -126625,6 +128605,20 @@ struct sqlite3_api_routines { sqlite3_file *(*database_file_object)(const char*); /* Version 3.34.0 and later */ int (*txn_state)(sqlite3*,const char*); + /* Version 3.36.1 and later */ + sqlite3_int64 (*changes64)(sqlite3*); + sqlite3_int64 (*total_changes64)(sqlite3*); + /* Version 3.37.0 and later */ + int (*autovacuum_pages)(sqlite3*, + unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int), + void*, void(*)(void*)); + /* Version 3.38.0 and later */ + int (*error_offset)(sqlite3*); + int (*vtab_rhs_value)(sqlite3_index_info*,int,sqlite3_value**); + int (*vtab_distinct)(sqlite3_index_info*); + int (*vtab_in)(sqlite3_index_info*,int,int); + int (*vtab_in_first)(sqlite3_value*,sqlite3_value**); + int (*vtab_in_next)(sqlite3_value*,sqlite3_value**); }; /* @@ -126931,6 +128925,18 @@ typedef int (*sqlite3_loadext_entry)( #define sqlite3_database_file_object sqlite3_api->database_file_object /* Version 3.34.0 and later */ #define sqlite3_txn_state sqlite3_api->txn_state +/* Version 3.36.1 and later */ +#define sqlite3_changes64 sqlite3_api->changes64 +#define sqlite3_total_changes64 sqlite3_api->total_changes64 +/* Version 3.37.0 and later */ +#define sqlite3_autovacuum_pages sqlite3_api->autovacuum_pages +/* Version 3.38.0 and later */ +#define sqlite3_error_offset sqlite3_api->error_offset +#define sqlite3_vtab_rhs_value sqlite3_api->vtab_rhs_value +#define sqlite3_vtab_distinct sqlite3_api->vtab_distinct +#define sqlite3_vtab_in sqlite3_api->vtab_in +#define sqlite3_vtab_in_first sqlite3_api->vtab_in_first +#define sqlite3_vtab_in_next sqlite3_api->vtab_in_next #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -127415,6 +129421,18 @@ static const sqlite3_api_routines sqlite3Apis = { sqlite3_database_file_object, /* Version 3.34.0 and later */ sqlite3_txn_state, + /* Version 3.36.1 and later */ + sqlite3_changes64, + sqlite3_total_changes64, + /* Version 3.37.0 and later */ + sqlite3_autovacuum_pages, + /* Version 3.38.0 and later */ + sqlite3_error_offset, + sqlite3_vtab_rhs_value, + sqlite3_vtab_distinct, + sqlite3_vtab_in, + sqlite3_vtab_in_first, + sqlite3_vtab_in_next }; /* True if x is the directory separator character @@ -127877,13 +129895,14 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){ #define PragTyp_SOFT_HEAP_LIMIT 35 #define PragTyp_SYNCHRONOUS 36 #define PragTyp_TABLE_INFO 37 -#define PragTyp_TEMP_STORE 38 -#define PragTyp_TEMP_STORE_DIRECTORY 39 -#define PragTyp_THREADS 40 -#define PragTyp_WAL_AUTOCHECKPOINT 41 -#define PragTyp_WAL_CHECKPOINT 42 -#define PragTyp_LOCK_STATUS 43 -#define PragTyp_STATS 44 +#define PragTyp_TABLE_LIST 38 +#define PragTyp_TEMP_STORE 39 +#define PragTyp_TEMP_STORE_DIRECTORY 40 +#define PragTyp_THREADS 41 +#define PragTyp_WAL_AUTOCHECKPOINT 42 +#define PragTyp_WAL_CHECKPOINT 43 +#define PragTyp_LOCK_STATUS 44 +#define PragTyp_STATS 45 /* Property flags associated with various pragma. */ #define PragFlg_NeedSchema 0x01 /* Force schema load before running */ @@ -127916,45 +129935,51 @@ static const char *const pragCName[] = { /* 13 */ "pk", /* 14 */ "hidden", /* table_info reuses 8 */ - /* 15 */ "seqno", /* Used by: index_xinfo */ - /* 16 */ "cid", - /* 17 */ "name", - /* 18 */ "desc", - /* 19 */ "coll", - /* 20 */ "key", - /* 21 */ "name", /* Used by: function_list */ - /* 22 */ "builtin", - /* 23 */ "type", - /* 24 */ "enc", - /* 25 */ "narg", - /* 26 */ "flags", - /* 27 */ "tbl", /* Used by: stats */ - /* 28 */ "idx", - /* 29 */ "wdth", - /* 30 */ "hght", - /* 31 */ "flgs", - /* 32 */ "seq", /* Used by: index_list */ - /* 33 */ "name", - /* 34 */ "unique", - /* 35 */ "origin", - /* 36 */ "partial", - /* 37 */ "table", /* Used by: foreign_key_check */ - /* 38 */ "rowid", - /* 39 */ "parent", - /* 40 */ "fkid", - /* index_info reuses 15 */ - /* 41 */ "seq", /* Used by: database_list */ - /* 42 */ "name", - /* 43 */ "file", - /* 44 */ "busy", /* Used by: wal_checkpoint */ - /* 45 */ "log", - /* 46 */ "checkpointed", - /* collation_list reuses 32 */ - /* 47 */ "database", /* Used by: lock_status */ - /* 48 */ "status", - /* 49 */ "cache_size", /* Used by: default_cache_size */ + /* 15 */ "schema", /* Used by: table_list */ + /* 16 */ "name", + /* 17 */ "type", + /* 18 */ "ncol", + /* 19 */ "wr", + /* 20 */ "strict", + /* 21 */ "seqno", /* Used by: index_xinfo */ + /* 22 */ "cid", + /* 23 */ "name", + /* 24 */ "desc", + /* 25 */ "coll", + /* 26 */ "key", + /* 27 */ "name", /* Used by: function_list */ + /* 28 */ "builtin", + /* 29 */ "type", + /* 30 */ "enc", + /* 31 */ "narg", + /* 32 */ "flags", + /* 33 */ "tbl", /* Used by: stats */ + /* 34 */ "idx", + /* 35 */ "wdth", + /* 36 */ "hght", + /* 37 */ "flgs", + /* 38 */ "seq", /* Used by: index_list */ + /* 39 */ "name", + /* 40 */ "unique", + /* 41 */ "origin", + /* 42 */ "partial", + /* 43 */ "table", /* Used by: foreign_key_check */ + /* 44 */ "rowid", + /* 45 */ "parent", + /* 46 */ "fkid", + /* index_info reuses 21 */ + /* 47 */ "seq", /* Used by: database_list */ + /* 48 */ "name", + /* 49 */ "file", + /* 50 */ "busy", /* Used by: wal_checkpoint */ + /* 51 */ "log", + /* 52 */ "checkpointed", + /* collation_list reuses 38 */ + /* 53 */ "database", /* Used by: lock_status */ + /* 54 */ "status", + /* 55 */ "cache_size", /* Used by: default_cache_size */ /* module_list pragma_list reuses 9 */ - /* 50 */ "timeout", /* Used by: busy_timeout */ + /* 56 */ "timeout", /* Used by: busy_timeout */ }; /* Definitions of all built-in pragmas */ @@ -128005,7 +130030,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "busy_timeout", /* ePragTyp: */ PragTyp_BUSY_TIMEOUT, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 50, 1, + /* ColNames: */ 56, 1, /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) {/* zName: */ "cache_size", @@ -128044,7 +130069,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "collation_list", /* ePragTyp: */ PragTyp_COLLATION_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 32, 2, + /* ColNames: */ 38, 2, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) @@ -128079,14 +130104,14 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "database_list", /* ePragTyp: */ PragTyp_DATABASE_LIST, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0, - /* ColNames: */ 41, 3, + /* ColNames: */ 47, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) {/* zName: */ "default_cache_size", /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, - /* ColNames: */ 49, 1, + /* ColNames: */ 55, 1, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) @@ -128116,7 +130141,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "foreign_key_check", /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 37, 4, + /* ColNames: */ 43, 4, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FOREIGN_KEY) @@ -128159,7 +130184,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "function_list", /* ePragTyp: */ PragTyp_FUNCTION_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 21, 6, + /* ColNames: */ 27, 6, /* iArg: */ 0 }, #endif #endif @@ -128188,23 +130213,23 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "index_info", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 15, 3, + /* ColNames: */ 21, 3, /* iArg: */ 0 }, {/* zName: */ "index_list", /* ePragTyp: */ PragTyp_INDEX_LIST, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 32, 5, + /* ColNames: */ 38, 5, /* iArg: */ 0 }, {/* zName: */ "index_xinfo", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 15, 6, + /* ColNames: */ 21, 6, /* iArg: */ 1 }, #endif #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) {/* zName: */ "integrity_check", /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_Result1, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_Result1|PragFlg_SchemaOpt, /* ColNames: */ 0, 0, /* iArg: */ 0 }, #endif @@ -128238,7 +130263,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "lock_status", /* ePragTyp: */ PragTyp_LOCK_STATUS, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 47, 2, + /* ColNames: */ 53, 2, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) @@ -128312,7 +130337,7 @@ static const PragmaName aPragmaName[] = { #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) {/* zName: */ "quick_check", /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, - /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_Result1, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_Result1|PragFlg_SchemaOpt, /* ColNames: */ 0, 0, /* iArg: */ 0 }, #endif @@ -128377,7 +130402,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "stats", /* ePragTyp: */ PragTyp_STATS, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, - /* ColNames: */ 27, 5, + /* ColNames: */ 33, 5, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) @@ -128393,6 +130418,11 @@ static const PragmaName aPragmaName[] = { /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, /* ColNames: */ 8, 6, /* iArg: */ 0 }, + {/* zName: */ "table_list", + /* ePragTyp: */ PragTyp_TABLE_LIST, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1, + /* ColNames: */ 15, 6, + /* iArg: */ 0 }, {/* zName: */ "table_xinfo", /* ePragTyp: */ PragTyp_TABLE_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, @@ -128468,7 +130498,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "wal_checkpoint", /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, /* ePragFlg: */ PragFlg_NeedSchema, - /* ColNames: */ 44, 3, + /* ColNames: */ 50, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) @@ -128479,7 +130509,7 @@ static const PragmaName aPragmaName[] = { /* iArg: */ SQLITE_WriteSchema|SQLITE_NoSchemaError }, #endif }; -/* Number of pragmas: 67 on by default, 77 total. */ +/* Number of pragmas: 68 on by default, 78 total. */ /************** End of pragma.h **********************************************/ /************** Continuing where we left off in pragma.c *********************/ @@ -128921,7 +130951,11 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Locate the pragma in the lookup table */ pPragma = pragmaLocate(zLeft); - if( pPragma==0 ) goto pragma_out; + if( pPragma==0 ){ + /* IMP: R-43042-22504 No error messages are generated if an + ** unknown pragma is issued. */ + goto pragma_out; + } /* Make sure the database schema is loaded if the pragma requires that */ if( (pPragma->mPragFlg & PragFlg_NeedSchema)!=0 ){ @@ -129571,6 +131605,14 @@ SQLITE_PRIVATE void sqlite3Pragma( }else{ db->flags &= ~mask; if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; + if( (mask & SQLITE_WriteSchema)!=0 + && sqlite3_stricmp(zRight, "reset")==0 + ){ + /* IMP: R-60817-01178 If the argument is "RESET" then schema + ** writing is disabled (as with "PRAGMA writable_schema=OFF") and, + ** in addition, the schema is reloaded. */ + sqlite3ResetAllSchemasOfConnection(db); + } } /* Many of the flag-pragmas modify the code generated by the SQL @@ -129611,6 +131653,7 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3ViewGetColumnNames(pParse, pTab); for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ int isHidden = 0; + const Expr *pColExpr; if( pCol->colFlags & COLFLAG_NOINSERT ){ if( pPragma->iArg==0 ){ nHidden++; @@ -129631,13 +131674,16 @@ SQLITE_PRIVATE void sqlite3Pragma( }else{ for(k=1; k<=pTab->nCol && pPk->aiColumn[k-1]!=i; k++){} } - assert( pCol->pDflt==0 || pCol->pDflt->op==TK_SPAN || isHidden>=2 ); + pColExpr = sqlite3ColumnExpr(pTab,pCol); + assert( pColExpr==0 || pColExpr->op==TK_SPAN || isHidden>=2 ); + assert( pColExpr==0 || !ExprHasProperty(pColExpr, EP_IntValue) + || isHidden>=2 ); sqlite3VdbeMultiLoad(v, 1, pPragma->iArg ? "issisii" : "issisi", i-nHidden, - pCol->zName, + pCol->zCnName, sqlite3ColumnType(pCol,""), pCol->notNull ? 1 : 0, - pCol->pDflt && isHidden<2 ? pCol->pDflt->u.zToken : 0, + (isHidden>=2 || pColExpr==0) ? 0 : pColExpr->u.zToken, k, isHidden); } @@ -129645,6 +131691,85 @@ SQLITE_PRIVATE void sqlite3Pragma( } break; + /* + ** PRAGMA table_list + ** + ** Return a single row for each table, virtual table, or view in the + ** entire schema. + ** + ** schema: Name of attached database hold this table + ** name: Name of the table itself + ** type: "table", "view", "virtual", "shadow" + ** ncol: Number of columns + ** wr: True for a WITHOUT ROWID table + ** strict: True for a STRICT table + */ + case PragTyp_TABLE_LIST: { + int ii; + pParse->nMem = 6; + sqlite3CodeVerifyNamedSchema(pParse, zDb); + for(ii=0; iinDb; ii++){ + HashElem *k; + Hash *pHash; + int initNCol; + if( zDb && sqlite3_stricmp(zDb, db->aDb[ii].zDbSName)!=0 ) continue; + + /* Ensure that the Table.nCol field is initialized for all views + ** and virtual tables. Each time we initialize a Table.nCol value + ** for a table, that can potentially disrupt the hash table, so restart + ** the initialization scan. + */ + pHash = &db->aDb[ii].pSchema->tblHash; + initNCol = sqliteHashCount(pHash); + while( initNCol-- ){ + for(k=sqliteHashFirst(pHash); 1; k=sqliteHashNext(k) ){ + Table *pTab; + if( k==0 ){ initNCol = 0; break; } + pTab = sqliteHashData(k); + if( pTab->nCol==0 ){ + char *zSql = sqlite3MPrintf(db, "SELECT*FROM\"%w\"", pTab->zName); + if( zSql ){ + sqlite3_stmt *pDummy = 0; + (void)sqlite3_prepare(db, zSql, -1, &pDummy, 0); + (void)sqlite3_finalize(pDummy); + sqlite3DbFree(db, zSql); + } + if( db->mallocFailed ){ + sqlite3ErrorMsg(db->pParse, "out of memory"); + db->pParse->rc = SQLITE_NOMEM_BKPT; + } + pHash = &db->aDb[ii].pSchema->tblHash; + break; + } + } + } + + for(k=sqliteHashFirst(pHash); k; k=sqliteHashNext(k) ){ + Table *pTab = sqliteHashData(k); + const char *zType; + if( zRight && sqlite3_stricmp(zRight, pTab->zName)!=0 ) continue; + if( IsView(pTab) ){ + zType = "view"; + }else if( IsVirtual(pTab) ){ + zType = "virtual"; + }else if( pTab->tabFlags & TF_Shadow ){ + zType = "shadow"; + }else{ + zType = "table"; + } + sqlite3VdbeMultiLoad(v, 1, "sssiii", + db->aDb[ii].zDbSName, + sqlite3PreferredTableName(pTab->zName), + zType, + pTab->nCol, + (pTab->tabFlags & TF_WithoutRowid)!=0, + (pTab->tabFlags & TF_Strict)!=0 + ); + } + } + } + break; + #ifdef SQLITE_DEBUG case PragTyp_STATS: { Index *pIdx; @@ -129654,7 +131779,7 @@ SQLITE_PRIVATE void sqlite3Pragma( for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){ Table *pTab = sqliteHashData(i); sqlite3VdbeMultiLoad(v, 1, "ssiii", - pTab->zName, + sqlite3PreferredTableName(pTab->zName), 0, pTab->szTabRow, pTab->nRowLogEst, @@ -129704,7 +131829,7 @@ SQLITE_PRIVATE void sqlite3Pragma( for(i=0; iaiColumn[i]; sqlite3VdbeMultiLoad(v, 1, "iisX", i, cnum, - cnum<0 ? 0 : pTab->aCol[cnum].zName); + cnum<0 ? 0 : pTab->aCol[cnum].zCnName); if( pPragma->iArg ){ sqlite3VdbeMultiLoad(v, 4, "isiX", pIdx->aSortOrder[i], @@ -129773,11 +131898,13 @@ SQLITE_PRIVATE void sqlite3Pragma( pParse->nMem = 6; for(i=0; iu.pHash ){ + assert( p->funcFlags & SQLITE_FUNC_BUILTIN ); pragmaFunclistLine(v, p, 1, showInternFunc); } } for(j=sqliteHashFirst(&db->aFunc); j; j=sqliteHashNext(j)){ p = (FuncDef*)sqliteHashData(j); + assert( (p->funcFlags & SQLITE_FUNC_BUILTIN)==0 ); pragmaFunclistLine(v, p, 0, showInternFunc); } } @@ -129811,8 +131938,8 @@ SQLITE_PRIVATE void sqlite3Pragma( FKey *pFK; Table *pTab; pTab = sqlite3FindTable(db, zRight, zDb); - if( pTab ){ - pFK = pTab->pFKey; + if( pTab && IsOrdinaryTable(pTab) ){ + pFK = pTab->u.tab.pFKey; if( pFK ){ int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); int i = 0; @@ -129825,7 +131952,7 @@ SQLITE_PRIVATE void sqlite3Pragma( i, j, pFK->zTo, - pTab->aCol[pFK->aCol[j].iFrom].zName, + pTab->aCol[pFK->aCol[j].iFrom].zCnName, pFK->aCol[j].zCol, actionName(pFK->aAction[1]), /* ON UPDATE */ actionName(pFK->aAction[0]), /* ON DELETE */ @@ -129871,7 +131998,7 @@ SQLITE_PRIVATE void sqlite3Pragma( pTab = (Table*)sqliteHashData(k); k = sqliteHashNext(k); } - if( pTab==0 || pTab->pFKey==0 ) continue; + if( pTab==0 || !IsOrdinaryTable(pTab) || pTab->u.tab.pFKey==0 ) continue; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); zDb = db->aDb[iDb].zDbSName; sqlite3CodeVerifySchema(pParse, iDb); @@ -129879,7 +132006,8 @@ SQLITE_PRIVATE void sqlite3Pragma( if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow; sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead); sqlite3VdbeLoadString(v, regResult, pTab->zName); - for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ + assert( IsOrdinaryTable(pTab) ); + for(i=1, pFK=pTab->u.tab.pFKey; pFK; i++, pFK=pFK->pNextFrom){ pParent = sqlite3FindTable(db, pFK->zTo, zDb); if( pParent==0 ) continue; pIdx = 0; @@ -129901,7 +132029,8 @@ SQLITE_PRIVATE void sqlite3Pragma( if( pFK ) break; if( pParse->nTabnTab = i; addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, 0); VdbeCoverage(v); - for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ + assert( IsOrdinaryTable(pTab) ); + for(i=1, pFK=pTab->u.tab.pFKey; pFK; i++, pFK=pFK->pNextFrom){ pParent = sqlite3FindTable(db, pFK->zTo, zDb); pIdx = 0; aiCols = 0; @@ -129915,6 +132044,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ** regRow..regRow+n. If any of the child key values are NULL, this ** row cannot cause an FK violation. Jump directly to addrOk in ** this case. */ + if( regRow+pFK->nCol>pParse->nMem ) pParse->nMem = regRow+pFK->nCol; for(j=0; jnCol; j++){ int iCol = aiCols ? aiCols[j] : pFK->aCol[j].iFrom; sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, iCol, regRow+j); @@ -130101,8 +132231,9 @@ SQLITE_PRIVATE void sqlite3Pragma( int loopTop; int iDataCur, iIdxCur; int r1 = -1; + int bStrict; - if( pTab->tnum<1 ) continue; /* Skip VIEWs or VIRTUAL TABLEs */ + if( !IsOrdinaryTable(pTab) ) continue; if( pObjTab && pObjTab!=pTab ) continue; pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, 0, @@ -130122,23 +132253,48 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Sanity check on record header decoding */ sqlite3VdbeAddOp3(v, OP_Column, iDataCur, pTab->nNVCol-1,3); sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); + VdbeComment((v, "(right-most column)")); } - /* Verify that all NOT NULL columns really are NOT NULL */ + /* Verify that all NOT NULL columns really are NOT NULL. At the + ** same time verify the type of the content of STRICT tables */ + bStrict = (pTab->tabFlags & TF_Strict)!=0; for(j=0; jnCol; j++){ char *zErr; - int jmp2; + Column *pCol = pTab->aCol + j; + int doError, jmp2; if( j==pTab->iPKey ) continue; - if( pTab->aCol[j].notNull==0 ) continue; + if( pCol->notNull==0 && !bStrict ) continue; + doError = bStrict ? sqlite3VdbeMakeLabel(pParse) : 0; sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); if( sqlite3VdbeGetOp(v,-1)->opcode==OP_Column ){ sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); } - jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v); - zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, - pTab->aCol[j].zName); - sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); - integrityCheckResultRow(v); - sqlite3VdbeJumpHere(v, jmp2); + if( pCol->notNull ){ + jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v); + zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, + pCol->zCnName); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); + if( bStrict && pCol->eCType!=COLTYPE_ANY ){ + sqlite3VdbeGoto(v, doError); + }else{ + integrityCheckResultRow(v); + } + sqlite3VdbeJumpHere(v, jmp2); + } + if( (pTab->tabFlags & TF_Strict)!=0 + && pCol->eCType!=COLTYPE_ANY + ){ + jmp2 = sqlite3VdbeAddOp3(v, OP_IsNullOrType, 3, 0, + sqlite3StdTypeMap[pCol->eCType-1]); + VdbeCoverage(v); + zErr = sqlite3MPrintf(db, "non-%s value in %s.%s", + sqlite3StdType[pCol->eCType-1], + pTab->zName, pTab->aCol[j].zCnName); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); + sqlite3VdbeResolveLabel(v, doError); + integrityCheckResultRow(v); + sqlite3VdbeJumpHere(v, jmp2); + } } /* Verify CHECK constraints */ if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){ @@ -130672,12 +132828,12 @@ SQLITE_PRIVATE void sqlite3Pragma( case PragTyp_ANALYSIS_LIMIT: { sqlite3_int64 N; if( zRight - && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK + && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK /* IMP: R-40975-20399 */ && N>=0 ){ db->nAnalysisLimit = (int)(N&0x7fffffff); } - returnSingleInt(v, db->nAnalysisLimit); + returnSingleInt(v, db->nAnalysisLimit); /* IMP: R-57594-65522 */ break; } @@ -131079,10 +133235,15 @@ static void corruptSchema( pData->rc = SQLITE_NOMEM_BKPT; }else if( pData->pzErrMsg[0]!=0 ){ /* A error message has already been generated. Do not overwrite it */ - }else if( pData->mInitFlags & (INITFLAG_AlterRename|INITFLAG_AlterDrop) ){ + }else if( pData->mInitFlags & (INITFLAG_AlterMask) ){ + static const char *azAlterType[] = { + "rename", + "drop column", + "add column" + }; *pData->pzErrMsg = sqlite3MPrintf(db, "error in %s %s after %s: %s", azObj[0], azObj[1], - (pData->mInitFlags & INITFLAG_AlterRename) ? "rename" : "drop column", + azAlterType[(pData->mInitFlags&INITFLAG_AlterMask)-1], zExtra ); pData->rc = SQLITE_ERROR; @@ -131184,7 +133345,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char } } db->init.orphanTrigger = 0; - db->init.azInit = argv; + db->init.azInit = (const char**)argv; pStmt = 0; TESTONLY(rcp = ) sqlite3Prepare(db, argv[4], -1, 0, 0, &pStmt, 0); rc = db->errCode; @@ -131203,6 +133364,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char } } } + db->init.azInit = sqlite3StdType; /* Any array of string ptrs will do */ sqlite3_finalize(pStmt); }else if( argv[1]==0 || (argv[4]!=0 && argv[4][0]!=0) ){ corruptSchema(pData, argv, 0); @@ -131433,7 +133595,7 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl sqlite3ResetAllSchemasOfConnection(db); pDb = &db->aDb[iDb]; }else - if( rc==SQLITE_OK || (db->flags&SQLITE_NoSchemaError)){ + if( rc==SQLITE_OK || ((db->flags&SQLITE_NoSchemaError) && rc!=SQLITE_NOMEM)){ /* Hack: If the SQLITE_NoSchemaError flag is set, then consider ** the schema loaded, even if errors (other than OOM) occurred. In ** this situation the current sqlite3_prepare() operation will fail, @@ -131612,8 +133774,14 @@ SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){ /* ** Free all memory allocations in the pParse object */ -SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ +SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){ sqlite3 *db = pParse->db; + assert( db!=0 ); + assert( db->pParse==pParse ); + assert( pParse->nested==0 ); +#ifndef SQLITE_OMIT_SHARED_CACHE + sqlite3DbFree(db, pParse->aTableLock); +#endif while( pParse->pCleanup ){ ParseCleanup *pCleanup = pParse->pCleanup; pParse->pCleanup = pCleanup->pNext; @@ -131624,11 +133792,12 @@ SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ if( pParse->pConstExpr ){ sqlite3ExprListDelete(db, pParse->pConstExpr); } - if( db ){ - assert( db->lookaside.bDisable >= pParse->disableLookaside ); - db->lookaside.bDisable -= pParse->disableLookaside; - db->lookaside.sz = db->lookaside.bDisable ? 0 : db->lookaside.szTrue; - } + assert( db->lookaside.bDisable >= pParse->disableLookaside ); + db->lookaside.bDisable -= pParse->disableLookaside; + db->lookaside.sz = db->lookaside.bDisable ? 0 : db->lookaside.szTrue; + assert( pParse->db->pParse==pParse ); + db->pParse = pParse->pOuterParse; + pParse->db = 0; pParse->disableLookaside = 0; } @@ -131641,7 +133810,7 @@ SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ ** cost for this mechansim (an extra malloc), so it should not be used ** for common cleanups that happen on most calls. But for less ** common cleanups, we save a single NULL-pointer comparison in -** sqlite3ParserReset(), which reduces the total CPU cycle count. +** sqlite3ParseObjectReset(), which reduces the total CPU cycle count. ** ** If a memory allocation error occurs, then the cleanup happens immediately. ** When either SQLITE_DEBUG or SQLITE_COVERAGE_TEST are defined, the @@ -131681,6 +133850,33 @@ SQLITE_PRIVATE void *sqlite3ParserAddCleanup( return pPtr; } +/* +** Turn bulk memory into a valid Parse object and link that Parse object +** into database connection db. +** +** Call sqlite3ParseObjectReset() to undo this operation. +** +** Caution: Do not confuse this routine with sqlite3ParseObjectInit() which +** is generated by Lemon. +*/ +SQLITE_PRIVATE void sqlite3ParseObjectInit(Parse *pParse, sqlite3 *db){ + memset(PARSE_HDR(pParse), 0, PARSE_HDR_SZ); + memset(PARSE_TAIL(pParse), 0, PARSE_TAIL_SZ); + assert( db->pParse!=pParse ); + pParse->pOuterParse = db->pParse; + db->pParse = pParse; + pParse->db = db; + if( db->mallocFailed ) sqlite3ErrorMsg(pParse, "out of memory"); +} + +/* +** Maximum number of times that we will try again to prepare a statement +** that returns SQLITE_ERROR_RETRY. +*/ +#ifndef SQLITE_MAX_PREPARE_RETRY +# define SQLITE_MAX_PREPARE_RETRY 25 +#endif + /* ** Compile the UTF-8 encoded SQL statement zSql into a statement handle. */ @@ -131693,16 +133889,19 @@ static int sqlite3Prepare( sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ - char *zErrMsg = 0; /* Error message */ int rc = SQLITE_OK; /* Result code */ int i; /* Loop counter */ Parse sParse; /* Parsing context */ - memset(&sParse, 0, PARSE_HDR_SZ); + /* sqlite3ParseObjectInit(&sParse, db); // inlined for performance */ + memset(PARSE_HDR(&sParse), 0, PARSE_HDR_SZ); memset(PARSE_TAIL(&sParse), 0, PARSE_TAIL_SZ); + sParse.pOuterParse = db->pParse; + db->pParse = &sParse; + sParse.db = db; sParse.pReprepare = pReprepare; assert( ppStmt && *ppStmt==0 ); - /* assert( !db->mallocFailed ); // not true with SQLITE_USE_ALLOCA */ + if( db->mallocFailed ) sqlite3ErrorMsg(&sParse, "out of memory"); assert( sqlite3_mutex_held(db->mutex) ); /* For a long-term use prepared statement avoid the use of @@ -131755,7 +133954,6 @@ static int sqlite3Prepare( sqlite3VtabUnlockList(db); - sParse.db = db; if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){ char *zSqlCopy; int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; @@ -131768,14 +133966,14 @@ static int sqlite3Prepare( } zSqlCopy = sqlite3DbStrNDup(db, zSql, nBytes); if( zSqlCopy ){ - sqlite3RunParser(&sParse, zSqlCopy, &zErrMsg); + sqlite3RunParser(&sParse, zSqlCopy); sParse.zTail = &zSql[sParse.zTail-zSqlCopy]; sqlite3DbFree(db, zSqlCopy); }else{ sParse.zTail = &zSql[nBytes]; } }else{ - sqlite3RunParser(&sParse, zSql, &zErrMsg); + sqlite3RunParser(&sParse, zSql); } assert( 0==sParse.nQueryLoop ); @@ -131791,7 +133989,7 @@ static int sqlite3Prepare( sParse.checkSchema = 0; } if( sParse.rc!=SQLITE_OK && sParse.rc!=SQLITE_DONE ){ - if( sParse.checkSchema ){ + if( sParse.checkSchema && db->init.busy==0 ){ schemaIsValid(&sParse); } if( sParse.pVdbe ){ @@ -131799,14 +133997,14 @@ static int sqlite3Prepare( } assert( 0==(*ppStmt) ); rc = sParse.rc; - if( zErrMsg ){ - sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg); - sqlite3DbFree(db, zErrMsg); + if( sParse.zErrMsg ){ + sqlite3ErrorWithMsg(db, rc, "%s", sParse.zErrMsg); + sqlite3DbFree(db, sParse.zErrMsg); }else{ sqlite3Error(db, rc); } }else{ - assert( zErrMsg==0 ); + assert( sParse.zErrMsg==0 ); *ppStmt = (sqlite3_stmt*)sParse.pVdbe; rc = SQLITE_OK; sqlite3ErrorClear(db); @@ -131822,7 +134020,7 @@ static int sqlite3Prepare( end_prepare: - sqlite3ParserReset(&sParse); + sqlite3ParseObjectReset(&sParse); return rc; } static int sqlite3LockAndPrepare( @@ -131852,7 +134050,8 @@ static int sqlite3LockAndPrepare( ** reset is considered a permanent error. */ rc = sqlite3Prepare(db, zSql, nBytes, prepFlags, pOld, ppStmt, pzTail); assert( rc==SQLITE_OK || *ppStmt==0 ); - }while( rc==SQLITE_ERROR_RETRY + if( rc==SQLITE_OK || db->mallocFailed ) break; + }while( (rc==SQLITE_ERROR_RETRY && (cnt++)aCol, i=0; inCol; pCol++, i++){ - if( pCol->hName==h && sqlite3StrICmp(pCol->zName, zCol)==0 ) return i; + if( pCol->hName==h && sqlite3StrICmp(pCol->zCnName, zCol)==0 ) return i; } return -1; } @@ -132419,18 +134618,21 @@ static void addWhereTerm( pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight); pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2); + assert( pE2!=0 || pEq==0 ); /* Due to db->mallocFailed test + ** in sqlite3DbMallocRawNN() called from + ** sqlite3PExpr(). */ if( pEq && isOuterJoin ){ ExprSetProperty(pEq, EP_FromJoin); assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(pEq, EP_NoReduce); - pEq->iRightJoinTable = pE2->iTable; + pEq->w.iRightJoinTable = pE2->iTable; } *ppWhere = sqlite3ExprAnd(pParse, *ppWhere, pEq); } /* ** Set the EP_FromJoin property on all terms of the given expression. -** And set the Expr.iRightJoinTable to iTable for every term in the +** And set the Expr.w.iRightJoinTable to iTable for every term in the ** expression. ** ** The EP_FromJoin property is used on terms of an expression to tell @@ -132440,8 +134642,8 @@ static void addWhereTerm( ** WHERE clause during join processing but we need to remember that they ** originated in the ON or USING clause. ** -** The Expr.iRightJoinTable tells the WHERE clause processing that the -** expression depends on table iRightJoinTable even if that table is not +** The Expr.w.iRightJoinTable tells the WHERE clause processing that the +** expression depends on table w.iRightJoinTable even if that table is not ** explicitly mentioned in the expression. That information is needed ** for cases like this: ** @@ -132459,11 +134661,14 @@ SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable){ ExprSetProperty(p, EP_FromJoin); assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(p, EP_NoReduce); - p->iRightJoinTable = iTable; - if( p->op==TK_FUNCTION && p->x.pList ){ - int i; - for(i=0; ix.pList->nExpr; i++){ - sqlite3SetJoinExpr(p->x.pList->a[i].pExpr, iTable); + p->w.iRightJoinTable = iTable; + if( p->op==TK_FUNCTION ){ + assert( ExprUseXList(p) ); + if( p->x.pList ){ + int i; + for(i=0; ix.pList->nExpr; i++){ + sqlite3SetJoinExpr(p->x.pList->a[i].pExpr, iTable); + } } } sqlite3SetJoinExpr(p->pLeft, iTable); @@ -132472,7 +134677,7 @@ SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable){ } /* Undo the work of sqlite3SetJoinExpr(). In the expression p, convert every -** term that is marked with EP_FromJoin and iRightJoinTable==iTable into +** term that is marked with EP_FromJoin and w.iRightJoinTable==iTable into ** an ordinary term that omits the EP_FromJoin mark. ** ** This happens when a LEFT JOIN is simplified into an ordinary JOIN. @@ -132480,16 +134685,19 @@ SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable){ static void unsetJoinExpr(Expr *p, int iTable){ while( p ){ if( ExprHasProperty(p, EP_FromJoin) - && (iTable<0 || p->iRightJoinTable==iTable) ){ + && (iTable<0 || p->w.iRightJoinTable==iTable) ){ ExprClearProperty(p, EP_FromJoin); } if( p->op==TK_COLUMN && p->iTable==iTable ){ ExprClearProperty(p, EP_CanBeNull); } - if( p->op==TK_FUNCTION && p->x.pList ){ - int i; - for(i=0; ix.pList->nExpr; i++){ - unsetJoinExpr(p->x.pList->a[i].pExpr, iTable); + if( p->op==TK_FUNCTION ){ + assert( ExprUseXList(p) ); + if( p->x.pList ){ + int i; + for(i=0; ix.pList->nExpr; i++){ + unsetJoinExpr(p->x.pList->a[i].pExpr, iTable); + } } } unsetJoinExpr(p->pLeft, iTable); @@ -132542,7 +134750,7 @@ static int sqliteProcessJoin(Parse *pParse, Select *p){ int iLeftCol; /* Matching column in the left table */ if( IsHiddenColumn(&pRightTab->aCol[j]) ) continue; - zName = pRightTab->aCol[j].zName; + zName = pRightTab->aCol[j].zCnName; if( tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol, 1) ){ addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, j, isOuter, &p->pWhere); @@ -132945,7 +135153,9 @@ static void fixDistinctOpenEph( int iVal, /* Value returned by codeDistinct() */ int iOpenEphAddr /* Address of OP_OpenEphemeral instruction for iTab */ ){ - if( eTnctType==WHERE_DISTINCT_UNIQUE || eTnctType==WHERE_DISTINCT_ORDERED ){ + if( pParse->nErr==0 + && (eTnctType==WHERE_DISTINCT_UNIQUE || eTnctType==WHERE_DISTINCT_ORDERED) + ){ Vdbe *v = pParse->pVdbe; sqlite3VdbeChangeToNoop(v, iOpenEphAddr); if( sqlite3VdbeGetOp(v, iOpenEphAddr+1)->opcode==OP_Explain ){ @@ -133002,9 +135212,13 @@ static void selectExprDefer( struct ExprList_item *pItem = &pEList->a[i]; if( pItem->u.x.iOrderByCol==0 ){ Expr *pExpr = pItem->pExpr; - Table *pTab = pExpr->y.pTab; - if( pExpr->op==TK_COLUMN && pExpr->iColumn>=0 && pTab && !IsVirtual(pTab) - && (pTab->aCol[pExpr->iColumn].colFlags & COLFLAG_SORTERREF) + Table *pTab; + if( pExpr->op==TK_COLUMN + && pExpr->iColumn>=0 + && ALWAYS( ExprUseYTab(pExpr) ) + && (pTab = pExpr->y.pTab)!=0 + && IsOrdinaryTable(pTab) + && (pTab->aCol[pExpr->iColumn].colFlags & COLFLAG_SORTERREF)!=0 ){ int j; for(j=0; jiTable = pExpr->iTable; + assert( ExprUseYTab(pNew) ); pNew->y.pTab = pExpr->y.pTab; pNew->iColumn = pPk ? pPk->aiColumn[k] : -1; pExtra = sqlite3ExprListAppend(pParse, pExtra, pNew); @@ -133460,7 +135675,7 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ p->nRef = 1; memset(&p[1], 0, nExtra); }else{ - sqlite3OomFault(db); + return (KeyInfo*)sqlite3OomFault(db); } return p; } @@ -133631,6 +135846,9 @@ static void generateSortTail( iTab = pSort->iECursor; if( eDest==SRT_Output || eDest==SRT_Coroutine || eDest==SRT_Mem ){ + if( eDest==SRT_Mem && p->iOffset ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, pDest->iSdst); + } regRowid = 0; regRow = pDest->iSdst; }else{ @@ -133873,7 +136091,7 @@ static const char *columnTypeImpl( break; } - assert( pTab && pExpr->y.pTab==pTab ); + assert( pTab && ExprUseYTab(pExpr) && pExpr->y.pTab==pTab ); if( pS ){ /* The "table" is actually a sub-select or a view in the FROM clause ** of the SELECT statement. Return the declaration type and origin @@ -133907,7 +136125,7 @@ static const char *columnTypeImpl( zType = "INTEGER"; zOrigCol = "rowid"; }else{ - zOrigCol = pTab->aCol[iCol].zName; + zOrigCol = pTab->aCol[iCol].zCnName; zType = sqlite3ColumnType(&pTab->aCol[iCol],0); } zOrigTab = pTab->zName; @@ -133933,9 +136151,11 @@ static const char *columnTypeImpl( ** statement. */ NameContext sNC; - Select *pS = pExpr->x.pSelect; - Expr *p = pS->pEList->a[0].pExpr; - assert( ExprHasProperty(pExpr, EP_xIsSelect) ); + Select *pS; + Expr *p; + assert( ExprUseXSelect(pExpr) ); + pS = pExpr->x.pSelect; + p = pS->pEList->a[0].pExpr; sNC.pSrcList = pS->pSrc; sNC.pNext = pNC; sNC.pParse = pNC->pParse; @@ -134064,7 +136284,8 @@ SQLITE_PRIVATE void sqlite3GenerateColumnNames( assert( p!=0 ); assert( p->op!=TK_AGG_COLUMN ); /* Agg processing has not run yet */ - assert( p->op!=TK_COLUMN || p->y.pTab!=0 ); /* Covering idx not yet coded */ + assert( p->op!=TK_COLUMN + || (ExprUseYTab(p) && p->y.pTab!=0) ); /* Covering idx not yet coded */ if( pEList->a[i].zEName && pEList->a[i].eEName==ENAME_NAME ){ /* An AS clause always takes first priority */ char *zName = pEList->a[i].zEName; @@ -134079,7 +136300,7 @@ SQLITE_PRIVATE void sqlite3GenerateColumnNames( if( iCol<0 ){ zCol = "rowid"; }else{ - zCol = pTab->aCol[iCol].zName; + zCol = pTab->aCol[iCol].zCnName; } if( fullName ){ char *zName = 0; @@ -134160,11 +136381,14 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( pColExpr = pColExpr->pRight; assert( pColExpr!=0 ); } - if( pColExpr->op==TK_COLUMN && (pTab = pColExpr->y.pTab)!=0 ){ + if( pColExpr->op==TK_COLUMN + && ALWAYS( ExprUseYTab(pColExpr) ) + && (pTab = pColExpr->y.pTab)!=0 + ){ /* For columns use the column name name */ int iCol = pColExpr->iColumn; if( iCol<0 ) iCol = pTab->iPKey; - zName = iCol>=0 ? pTab->aCol[iCol].zName : "rowid"; + zName = iCol>=0 ? pTab->aCol[iCol].zCnName : "rowid"; }else if( pColExpr->op==TK_ID ){ assert( !ExprHasProperty(pColExpr, EP_IntValue) ); zName = pColExpr->u.zToken; @@ -134192,7 +136416,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( zName = sqlite3MPrintf(db, "%.*z:%u", nName, zName, ++cnt); if( cnt>3 ) sqlite3_randomness(sizeof(cnt), &cnt); } - pCol->zName = zName; + pCol->zCnName = zName; pCol->hName = sqlite3StrIHash(zName); sqlite3ColumnPropertiesFromName(0, pCol); if( zName && sqlite3HashInsert(&ht, zName, pCol)==pCol ){ @@ -134202,7 +136426,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( sqlite3HashClear(&ht); if( db->mallocFailed ){ for(j=0; jpEList->a; for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ const char *zType; - int n, m; + i64 n, m; pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT); p = a[i].pExpr; zType = columnType(&sNC, p, 0, 0, 0); @@ -134254,17 +136478,21 @@ SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation( pCol->affinity = sqlite3ExprAffinity(p); if( zType ){ m = sqlite3Strlen30(zType); - n = sqlite3Strlen30(pCol->zName); - pCol->zName = sqlite3DbReallocOrFree(db, pCol->zName, n+m+2); - if( pCol->zName ){ - memcpy(&pCol->zName[n+1], zType, m+1); + n = sqlite3Strlen30(pCol->zCnName); + pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2); + if( pCol->zCnName ){ + memcpy(&pCol->zCnName[n+1], zType, m+1); pCol->colFlags |= COLFLAG_HASTYPE; + }else{ + testcase( pCol->colFlags & COLFLAG_HASTYPE ); + pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); } } if( pCol->affinity<=SQLITE_AFF_NONE ) pCol->affinity = aff; pColl = sqlite3ExprCollSeq(pParse, p); - if( pColl && pCol->zColl==0 ){ - pCol->zColl = sqlite3DbStrDup(db, pColl->zName); + if( pColl ){ + assert( pTab->pIndex==0 ); + sqlite3ColumnSetColl(db, pCol, pColl->zName); } } pTab->szTabRow = 1; /* Any non-zero value works */ @@ -134428,7 +136656,7 @@ static CollSeq *multiSelectCollSeq(Parse *pParse, Select *p, int iCol){ */ static KeyInfo *multiSelectOrderByKeyInfo(Parse *pParse, Select *p, int nExtra){ ExprList *pOrderBy = p->pOrderBy; - int nOrderBy = p->pOrderBy->nExpr; + int nOrderBy = ALWAYS(pOrderBy!=0) ? pOrderBy->nExpr : 0; sqlite3 *db = pParse->db; KeyInfo *pRet = sqlite3KeyInfoAlloc(db, nOrderBy+nExtra, 1); if( pRet ){ @@ -134500,7 +136728,7 @@ static void generateWithRecursiveQuery( SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */ int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */ Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */ - Select *pSetup = p->pPrior; /* The setup query */ + Select *pSetup; /* The setup query */ Select *pFirstRec; /* Left-most recursive term */ int addrTop; /* Top of the loop */ int addrCont, addrBreak; /* CONTINUE and BREAK addresses */ @@ -134584,7 +136812,6 @@ static void generateWithRecursiveQuery( ** iDistinct table. pFirstRec is left pointing to the left-most ** recursive term of the CTE. */ - pFirstRec = p; for(pFirstRec=p; ALWAYS(pFirstRec!=0); pFirstRec=pFirstRec->pPrior){ if( pFirstRec->selFlags & SF_Aggregate ){ sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported"); @@ -135050,6 +137277,7 @@ static int multiSelect( int nCol; /* Number of columns in result set */ assert( p->pNext==0 ); + assert( p->pEList!=0 ); nCol = p->pEList->nExpr; pKeyInfo = sqlite3KeyInfoAlloc(db, nCol, 1); if( !pKeyInfo ){ @@ -135084,7 +137312,11 @@ static int multiSelect( multi_select_end: pDest->iSdst = dest.iSdst; pDest->nSdst = dest.nSdst; - sqlite3SelectDelete(db, pDelete); + if( pDelete ){ + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))sqlite3SelectDelete, + pDelete); + } return rc; } #endif /* SQLITE_OMIT_COMPOUND_SELECT */ @@ -135338,6 +137570,8 @@ static int multiSelectOrderBy( ){ int i, j; /* Loop counters */ Select *pPrior; /* Another SELECT immediately to our left */ + Select *pSplit; /* Left-most SELECT in the right-hand group */ + int nSelect; /* Number of SELECT statements in the compound */ Vdbe *v; /* Generate code to this VDBE */ SelectDest destA; /* Destination for coroutine A */ SelectDest destB; /* Destination for coroutine B */ @@ -135383,8 +137617,7 @@ static int multiSelectOrderBy( /* Patch up the ORDER BY clause */ op = p->op; - pPrior = p->pPrior; - assert( pPrior->pOrderBy==0 ); + assert( p->pPrior->pOrderBy==0 ); pOrderBy = p->pOrderBy; assert( pOrderBy ); nOrderBy = pOrderBy->nExpr; @@ -135397,6 +137630,7 @@ static int multiSelectOrderBy( for(i=1; db->mallocFailed==0 && i<=p->pEList->nExpr; i++){ struct ExprList_item *pItem; for(j=0, pItem=pOrderBy->a; ju.x.iOrderByCol>0 ); if( pItem->u.x.iOrderByCol==i ) break; } @@ -135423,6 +137657,7 @@ static int multiSelectOrderBy( struct ExprList_item *pItem; aPermute[0] = nOrderBy; for(i=1, pItem=pOrderBy->a; i<=nOrderBy; i++, pItem++){ + assert( pItem!=0 ); assert( pItem->u.x.iOrderByCol>0 ); assert( pItem->u.x.iOrderByCol<=p->pEList->nExpr ); aPermute[i] = pItem->u.x.iOrderByCol - 1; @@ -135432,11 +137667,6 @@ static int multiSelectOrderBy( pKeyMerge = 0; } - /* Reattach the ORDER BY clause to the query. - */ - p->pOrderBy = pOrderBy; - pPrior->pOrderBy = sqlite3ExprListDup(pParse->db, pOrderBy, 0); - /* Allocate a range of temporary registers and the KeyInfo needed ** for the logic that removes duplicate result rows when the ** operator is UNION, EXCEPT, or INTERSECT (but not UNION ALL). @@ -135461,12 +137691,30 @@ static int multiSelectOrderBy( /* Separate the left and the right query from one another */ - p->pPrior = 0; + nSelect = 1; + if( (op==TK_ALL || op==TK_UNION) + && OptimizationEnabled(db, SQLITE_BalancedMerge) + ){ + for(pSplit=p; pSplit->pPrior!=0 && pSplit->op==op; pSplit=pSplit->pPrior){ + nSelect++; + assert( pSplit->pPrior->pNext==pSplit ); + } + } + if( nSelect<=3 ){ + pSplit = p; + }else{ + pSplit = p; + for(i=2; ipPrior; } + } + pPrior = pSplit->pPrior; + assert( pPrior!=0 ); + pSplit->pPrior = 0; pPrior->pNext = 0; + assert( p->pOrderBy == pOrderBy ); + assert( pOrderBy!=0 || db->mallocFailed ); + pPrior->pOrderBy = sqlite3ExprListDup(pParse->db, pOrderBy, 0); sqlite3ResolveOrderGroupBy(pParse, p, p->pOrderBy, "ORDER"); - if( pPrior->pPrior==0 ){ - sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER"); - } + sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER"); /* Compute the limit registers */ computeLimitRegisters(pParse, p, labelEnd); @@ -135617,12 +137865,11 @@ static int multiSelectOrderBy( /* Reassembly the compound query so that it will be freed correctly ** by the calling function */ - if( p->pPrior ){ - sqlite3SelectDelete(db, p->pPrior); + if( pSplit->pPrior ){ + sqlite3SelectDelete(db, pSplit->pPrior); } - p->pPrior = pPrior; - pPrior->pNext = p; - + pSplit->pPrior = pPrior; + pPrior->pNext = pSplit; sqlite3ExprListDelete(db, pPrior->pOrderBy); pPrior->pOrderBy = 0; @@ -135672,9 +137919,9 @@ static Expr *substExpr( ){ if( pExpr==0 ) return 0; if( ExprHasProperty(pExpr, EP_FromJoin) - && pExpr->iRightJoinTable==pSubst->iTable + && pExpr->w.iRightJoinTable==pSubst->iTable ){ - pExpr->iRightJoinTable = pSubst->iNewTable; + pExpr->w.iRightJoinTable = pSubst->iNewTable; } if( pExpr->op==TK_COLUMN && pExpr->iTable==pSubst->iTable @@ -135713,7 +137960,7 @@ static Expr *substExpr( ExprSetProperty(pNew, EP_CanBeNull); } if( ExprHasProperty(pExpr,EP_FromJoin) ){ - sqlite3SetJoinExpr(pNew, pExpr->iRightJoinTable); + sqlite3SetJoinExpr(pNew, pExpr->w.iRightJoinTable); } sqlite3ExprDelete(db, pExpr); pExpr = pNew; @@ -135735,7 +137982,7 @@ static Expr *substExpr( } pExpr->pLeft = substExpr(pSubst, pExpr->pLeft); pExpr->pRight = substExpr(pSubst, pExpr->pRight); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ substSelect(pSubst, pExpr->x.pSelect, 1); }else{ substExprList(pSubst, pExpr->x.pList); @@ -135826,10 +138073,10 @@ static void recomputeColumnsUsed( ** new cursor number assigned, set an entry in the aCsrMap[] array ** to map the old cursor number to the new: ** -** aCsrMap[iOld] = iNew; +** aCsrMap[iOld+1] = iNew; ** ** The array is guaranteed by the caller to be large enough for all -** existing cursor numbers in pSrc. +** existing cursor numbers in pSrc. aCsrMap[0] is the array size. ** ** If pSrc contains any sub-selects, call this routine recursively ** on the FROM clause of each such sub-select, with iExcept set to -1. @@ -135845,10 +138092,11 @@ static void srclistRenumberCursors( for(i=0, pItem=pSrc->a; inSrc; i++, pItem++){ if( i!=iExcept ){ Select *p; - if( !pItem->fg.isRecursive || aCsrMap[pItem->iCursor]==0 ){ - aCsrMap[pItem->iCursor] = pParse->nTab++; + assert( pItem->iCursor < aCsrMap[0] ); + if( !pItem->fg.isRecursive || aCsrMap[pItem->iCursor+1]==0 ){ + aCsrMap[pItem->iCursor+1] = pParse->nTab++; } - pItem->iCursor = aCsrMap[pItem->iCursor]; + pItem->iCursor = aCsrMap[pItem->iCursor+1]; for(p=pItem->pSelect; p; p=p->pPrior){ srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); } @@ -135856,18 +138104,28 @@ static void srclistRenumberCursors( } } +/* +** *piCursor is a cursor number. Change it if it needs to be mapped. +*/ +static void renumberCursorDoMapping(Walker *pWalker, int *piCursor){ + int *aCsrMap = pWalker->u.aiCol; + int iCsr = *piCursor; + if( iCsr < aCsrMap[0] && aCsrMap[iCsr+1]>0 ){ + *piCursor = aCsrMap[iCsr+1]; + } +} + /* ** Expression walker callback used by renumberCursors() to update ** Expr objects to match newly assigned cursor numbers. */ static int renumberCursorsCb(Walker *pWalker, Expr *pExpr){ - int *aCsrMap = pWalker->u.aiCol; int op = pExpr->op; - if( (op==TK_COLUMN || op==TK_IF_NULL_ROW) && aCsrMap[pExpr->iTable] ){ - pExpr->iTable = aCsrMap[pExpr->iTable]; + if( op==TK_COLUMN || op==TK_IF_NULL_ROW ){ + renumberCursorDoMapping(pWalker, &pExpr->iTable); } - if( ExprHasProperty(pExpr, EP_FromJoin) && aCsrMap[pExpr->iRightJoinTable] ){ - pExpr->iRightJoinTable = aCsrMap[pExpr->iRightJoinTable]; + if( ExprHasProperty(pExpr, EP_FromJoin) ){ + renumberCursorDoMapping(pWalker, &pExpr->w.iRightJoinTable); } return WRC_Continue; } @@ -136211,7 +138469,8 @@ static int flattenSubquery( if( pSrc->nSrc>1 ){ if( pParse->nSelect>500 ) return 0; - aCsrMap = sqlite3DbMallocZero(db, pParse->nTab*sizeof(int)); + aCsrMap = sqlite3DbMallocZero(db, ((i64)pParse->nTab+1)*sizeof(int)); + if( aCsrMap ) aCsrMap[0] = pParse->nTab; } } @@ -136834,8 +139093,7 @@ static int pushDownWhereTerms( Parse *pParse, /* Parse context (for malloc() and error reporting) */ Select *pSubq, /* The subquery whose WHERE clause is to be augmented */ Expr *pWhere, /* The WHERE clause of the outer query */ - int iCursor, /* Cursor number of the subquery */ - int isLeftJoin /* True if pSubq is the right term of a LEFT JOIN */ + SrcItem *pSrc /* The subquery term of the outer FROM clause */ ){ Expr *pNew; int nChng = 0; @@ -136870,20 +139128,25 @@ static int pushDownWhereTerms( return 0; /* restriction (3) */ } while( pWhere->op==TK_AND ){ - nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, - iCursor, isLeftJoin); + nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, pSrc); pWhere = pWhere->pLeft; } + +#if 0 /* Legacy code. Checks now done by sqlite3ExprIsTableConstraint() */ if( isLeftJoin && (ExprHasProperty(pWhere,EP_FromJoin)==0 - || pWhere->iRightJoinTable!=iCursor) + || pWhere->w.iRightJoinTable!=iCursor) ){ return 0; /* restriction (4) */ } - if( ExprHasProperty(pWhere,EP_FromJoin) && pWhere->iRightJoinTable!=iCursor ){ + if( ExprHasProperty(pWhere,EP_FromJoin) + && pWhere->w.iRightJoinTable!=iCursor + ){ return 0; /* restriction (5) */ } - if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){ +#endif + + if( sqlite3ExprIsTableConstraint(pWhere, pSrc) ){ nChng++; pSubq->selFlags |= SF_PushDown; while( pSubq ){ @@ -136891,8 +139154,8 @@ static int pushDownWhereTerms( pNew = sqlite3ExprDup(pParse->db, pWhere, 0); unsetJoinExpr(pNew, -1); x.pParse = pParse; - x.iTable = iCursor; - x.iNewTable = iCursor; + x.iTable = pSrc->iCursor; + x.iNewTable = pSrc->iCursor; x.isLeftJoin = 0; x.pEList = pSubq->pEList; pNew = substExpr(&x, pNew); @@ -136934,7 +139197,7 @@ static int pushDownWhereTerms( */ static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){ int eRet = WHERE_ORDERBY_NORMAL; /* Return value */ - ExprList *pEList = pFunc->x.pList; /* Arguments to agg function */ + ExprList *pEList; /* Arguments to agg function */ const char *zFunc; /* Name of aggregate function pFunc */ ExprList *pOrderBy; u8 sortFlags = 0; @@ -136942,6 +139205,8 @@ static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){ assert( *ppMinMax==0 ); assert( pFunc->op==TK_AGG_FUNCTION ); assert( !IsWindowFunc(pFunc) ); + assert( ExprUseXList(pFunc) ); + pEList = pFunc->x.pList; if( pEList==0 || pEList->nExpr!=1 || ExprHasProperty(pFunc, EP_WinFunc) @@ -136949,6 +139214,7 @@ static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){ ){ return eRet; } + assert( !ExprHasProperty(pFunc, EP_IntValue) ); zFunc = pFunc->u.zToken; if( sqlite3StrICmp(zFunc, "min")==0 ){ eRet = WHERE_ORDERBY_MIN; @@ -136976,7 +139242,13 @@ static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){ ** ** where table is a database table, not a sub-select or view. If the query ** does match this pattern, then a pointer to the Table object representing -** is returned. Otherwise, 0 is returned. +** is returned. Otherwise, NULL is returned. +** +** This routine checks to see if it is safe to use the count optimization. +** A correct answer is still obtained (though perhaps more slowly) if +** this routine returns NULL when it could have returned a table pointer. +** But returning the pointer when NULL should have been returned can +** result in incorrect answers and/or crashes. So, when in doubt, return NULL. */ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ Table *pTab; @@ -136984,19 +139256,26 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ assert( !p->pGroupBy ); - if( p->pWhere || p->pEList->nExpr!=1 - || p->pSrc->nSrc!=1 || p->pSrc->a[0].pSelect + if( p->pWhere + || p->pEList->nExpr!=1 + || p->pSrc->nSrc!=1 + || p->pSrc->a[0].pSelect + || pAggInfo->nFunc!=1 ){ return 0; } pTab = p->pSrc->a[0].pTab; + assert( pTab!=0 ); + assert( !IsView(pTab) ); + if( !IsOrdinaryTable(pTab) ) return 0; pExpr = p->pEList->a[0].pExpr; - assert( pTab && !pTab->pSelect && pExpr ); - - if( IsVirtual(pTab) ) return 0; + assert( pExpr!=0 ); if( pExpr->op!=TK_AGG_FUNCTION ) return 0; - if( NEVER(pAggInfo->nFunc==0) ) return 0; + if( pExpr->pAggInfo!=pAggInfo ) return 0; if( (pAggInfo->aFunc[0].pFunc->funcFlags&SQLITE_FUNC_COUNT)==0 ) return 0; + assert( pAggInfo->aFunc[0].pFExpr==pExpr ); + testcase( ExprHasProperty(pExpr, EP_Distinct) ); + testcase( ExprHasProperty(pExpr, EP_WinFunc) ); if( ExprHasProperty(pExpr, EP_Distinct|EP_WinFunc) ) return 0; return pTab; @@ -137025,6 +139304,7 @@ SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, SrcItem *pFrom){ pParse->checkSchema = 1; return SQLITE_ERROR; } + assert( pFrom->fg.isCte==0 ); pFrom->u2.pIBIndex = pIdx; return SQLITE_OK; } @@ -137282,6 +139562,10 @@ static int resolveFromTermToCte( if( db->mallocFailed ) return 2; pFrom->pSelect->selFlags |= SF_CopyCte; assert( pFrom->pSelect ); + if( pFrom->fg.isIndexedBy ){ + sqlite3ErrorMsg(pParse, "no such index: \"%s\"", pFrom->u1.zIndexedBy); + return 2; + } pFrom->fg.isCte = 1; pFrom->u2.pCteUse = pCteUse; pCteUse->nUse++; @@ -137536,30 +139820,31 @@ static int selectExpander(Walker *pWalker, Select *p){ return WRC_Abort; } #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) - if( IsVirtual(pTab) || pTab->pSelect ){ + if( !IsOrdinaryTable(pTab) ){ i16 nCol; u8 eCodeOrig = pWalker->eCode; if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; assert( pFrom->pSelect==0 ); - if( pTab->pSelect - && (db->flags & SQLITE_EnableView)==0 - && pTab->pSchema!=db->aDb[1].pSchema - ){ - sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited", - pTab->zName); + if( IsView(pTab) ){ + if( (db->flags & SQLITE_EnableView)==0 + && pTab->pSchema!=db->aDb[1].pSchema + ){ + sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited", + pTab->zName); + } + pFrom->pSelect = sqlite3SelectDup(db, pTab->u.view.pSelect, 0); } #ifndef SQLITE_OMIT_VIRTUALTABLE - assert( SQLITE_VTABRISK_Normal==1 && SQLITE_VTABRISK_High==2 ); - if( IsVirtual(pTab) + else if( ALWAYS(IsVirtual(pTab)) && pFrom->fg.fromDDL - && ALWAYS(pTab->pVTable!=0) - && pTab->pVTable->eVtabRisk > ((db->flags & SQLITE_TrustedSchema)!=0) + && ALWAYS(pTab->u.vtab.p!=0) + && pTab->u.vtab.p->eVtabRisk > ((db->flags & SQLITE_TrustedSchema)!=0) ){ sqlite3ErrorMsg(pParse, "unsafe use of virtual table \"%s\"", pTab->zName); } + assert( SQLITE_VTABRISK_Normal==1 && SQLITE_VTABRISK_High==2 ); #endif - pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0); nCol = pTab->nCol; pTab->nCol = -1; pWalker->eCode = 1; /* Turn on Select.selId renumbering */ @@ -137578,7 +139863,8 @@ static int selectExpander(Walker *pWalker, Select *p){ /* Process NATURAL keywords, and ON and USING clauses of joins. */ - if( pParse->nErr || db->mallocFailed || sqliteProcessJoin(pParse, p) ){ + assert( db->mallocFailed==0 || pParse->nErr!=0 ); + if( pParse->nErr || sqliteProcessJoin(pParse, p) ){ return WRC_Abort; } @@ -137659,7 +139945,7 @@ static int selectExpander(Walker *pWalker, Select *p){ zSchemaName = iDb>=0 ? db->aDb[iDb].zDbSName : "*"; } for(j=0; jnCol; j++){ - char *zName = pTab->aCol[j].zName; + char *zName = pTab->aCol[j].zCnName; char *zColname; /* The computed column name */ char *zToFree; /* Malloced string that needs to be freed */ Token sColname; /* Computed column name as a token */ @@ -137875,12 +140161,13 @@ SQLITE_PRIVATE void sqlite3SelectPrep( NameContext *pOuterNC /* Name context for container */ ){ assert( p!=0 || pParse->db->mallocFailed ); + assert( pParse->db->pParse==pParse ); if( pParse->db->mallocFailed ) return; if( p->selFlags & SF_HasTypeInfo ) return; sqlite3SelectExpand(pParse, p); - if( pParse->nErr || pParse->db->mallocFailed ) return; + if( pParse->nErr ) return; sqlite3ResolveSelectNames(pParse, p, pOuterNC); - if( pParse->nErr || pParse->db->mallocFailed ) return; + if( pParse->nErr ) return; sqlite3SelectAddTypeInfo(pParse, p); } @@ -137897,8 +140184,10 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ int i; struct AggInfo_func *pFunc; int nReg = pAggInfo->nFunc + pAggInfo->nColumn; + assert( pParse->db->pParse==pParse ); + assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 ); if( nReg==0 ) return; - if( pParse->nErr || pParse->db->mallocFailed ) return; + if( pParse->nErr ) return; #ifdef SQLITE_DEBUG /* Verify that all AggInfo registers are within the range specified by ** AggInfo.mnReg..AggInfo.mxReg */ @@ -137916,7 +140205,7 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ for(pFunc=pAggInfo->aFunc, i=0; inFunc; i++, pFunc++){ if( pFunc->iDistinct>=0 ){ Expr *pE = pFunc->pFExpr; - assert( !ExprHasProperty(pE, EP_xIsSelect) ); + assert( ExprUseXList(pE) ); if( pE->x.pList==0 || pE->x.pList->nExpr!=1 ){ sqlite3ErrorMsg(pParse, "DISTINCT aggregates must have exactly one " "argument"); @@ -137941,8 +140230,9 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ int i; struct AggInfo_func *pF; for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ - ExprList *pList = pF->pFExpr->x.pList; - assert( !ExprHasProperty(pF->pFExpr, EP_xIsSelect) ); + ExprList *pList; + assert( ExprUseXList(pF->pFExpr) ); + pList = pF->pFExpr->x.pList; sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); } @@ -137976,9 +140266,10 @@ static void updateAccumulator( int nArg; int addrNext = 0; int regAgg; - ExprList *pList = pF->pFExpr->x.pList; - assert( !ExprHasProperty(pF->pFExpr, EP_xIsSelect) ); + ExprList *pList; + assert( ExprUseXList(pF->pFExpr) ); assert( !IsWindowFunc(pF->pFExpr) ); + pList = pF->pFExpr->x.pList; if( ExprHasProperty(pF->pFExpr, EP_WinFunc) ){ Expr *pFilter = pF->pFExpr->y.pWin->pFilter; if( pAggInfo->nAccumulator @@ -138091,8 +140382,16 @@ static void explainSimpleCount( static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){ if( pExpr->op!=TK_AND ){ Select *pS = pWalker->u.pSelect; + /* This routine is called before the HAVING clause of the current + ** SELECT is analyzed for aggregates. So if pExpr->pAggInfo is set + ** here, it indicates that the expression is a correlated reference to a + ** column from an outer aggregate query, or an aggregate function that + ** belongs to an outer query. Do not move the expression to the WHERE + ** clause in this obscure case, as doing so may corrupt the outer Select + ** statements AggInfo structure. */ if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, pS->pGroupBy) && ExprAlwaysFalse(pExpr)==0 + && pExpr->pAggInfo==0 ){ sqlite3 *db = pWalker->pParse->db; Expr *pNew = sqlite3Expr(db, TK_INTEGER, "1"); @@ -138216,7 +140515,9 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( p->pGroupBy ) return 0; pExpr = p->pEList->a[0].pExpr; if( pExpr->op!=TK_AGG_FUNCTION ) return 0; /* Result is an aggregate */ + assert( ExprUseUToken(pExpr) ); if( sqlite3_stricmp(pExpr->u.zToken,"count") ) return 0; /* Is count() */ + assert( ExprUseXList(pExpr) ); if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */ if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */ pSub = p->pSrc->a[0].pSelect; @@ -138309,10 +140610,12 @@ SQLITE_PRIVATE int sqlite3Select( u8 minMaxFlag; /* Flag for min/max queries */ db = pParse->db; + assert( pParse==db->pParse ); v = sqlite3GetVdbe(pParse); - if( p==0 || db->mallocFailed || pParse->nErr ){ + if( p==0 || pParse->nErr ){ return 1; } + assert( db->mallocFailed==0 ); if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; #if SELECTTRACE_ENABLED SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain)); @@ -138347,9 +140650,10 @@ SQLITE_PRIVATE int sqlite3Select( p->selFlags |= SF_NoopOrderBy; } sqlite3SelectPrep(pParse, p, 0); - if( pParse->nErr || db->mallocFailed ){ + if( pParse->nErr ){ goto select_end; } + assert( db->mallocFailed==0 ); assert( p->pEList!=0 ); #if SELECTTRACE_ENABLED if( sqlite3SelectTrace & 0x104 ){ @@ -138358,11 +140662,16 @@ SQLITE_PRIVATE int sqlite3Select( } #endif - /* If the SF_UpdateFrom flag is set, then this function is being called + /* If the SF_UFSrcCheck flag is set, then this function is being called ** as part of populating the temp table for an UPDATE...FROM statement. ** In this case, it is an error if the target object (pSrc->a[0]) name - ** or alias is duplicated within FROM clause (pSrc->a[1..n]). */ - if( p->selFlags & SF_UpdateFrom ){ + ** or alias is duplicated within FROM clause (pSrc->a[1..n]). + ** + ** Postgres disallows this case too. The reason is that some other + ** systems handle this case differently, and not all the same way, + ** which is just confusing. To avoid this, we follow PG's lead and + ** disallow it altogether. */ + if( p->selFlags & SF_UFSrcCheck ){ SrcItem *p0 = &p->pSrc->a[0]; for(i=1; ipSrc->nSrc; i++){ SrcItem *p1 = &p->pSrc->a[i]; @@ -138374,6 +140683,12 @@ SQLITE_PRIVATE int sqlite3Select( goto select_end; } } + + /* Clear the SF_UFSrcCheck flag. The check has already been performed, + ** and leaving this flag set can cause errors if a compound sub-query + ** in p->pSrc is flattened into this query and this function called + ** again as part of compound SELECT processing. */ + p->selFlags &= ~SF_UFSrcCheck; } if( pDest->eDest==SRT_Output ){ @@ -138382,7 +140697,7 @@ SQLITE_PRIVATE int sqlite3Select( #ifndef SQLITE_OMIT_WINDOWFUNC if( sqlite3WindowRewrite(pParse, p) ){ - assert( db->mallocFailed || pParse->nErr>0 ); + assert( pParse->nErr ); goto select_end; } #if SELECTTRACE_ENABLED @@ -138445,6 +140760,39 @@ SQLITE_PRIVATE int sqlite3Select( if( (pSub->selFlags & SF_Aggregate)!=0 ) continue; assert( pSub->pGroupBy==0 ); + /* If a FROM-clause subquery has an ORDER BY clause that is not + ** really doing anything, then delete it now so that it does not + ** interfere with query flattening. See the discussion at + ** https://sqlite.org/forum/forumpost/2d76f2bcf65d256a + ** + ** Beware of these cases where the ORDER BY clause may not be safely + ** omitted: + ** + ** (1) There is also a LIMIT clause + ** (2) The subquery was added to help with window-function + ** processing + ** (3) The subquery is in the FROM clause of an UPDATE + ** (4) The outer query uses an aggregate function other than + ** the built-in count(), min(), or max(). + ** (5) The ORDER BY isn't going to accomplish anything because + ** one of: + ** (a) The outer query has a different ORDER BY clause + ** (b) The subquery is part of a join + ** See forum post 062d576715d277c8 + */ + if( pSub->pOrderBy!=0 + && (p->pOrderBy!=0 || pTabList->nSrc>1) /* Condition (5) */ + && pSub->pLimit==0 /* Condition (1) */ + && (pSub->selFlags & SF_OrderByReqd)==0 /* Condition (2) */ + && (p->selFlags & SF_OrderByReqd)==0 /* Condition (3) and (4) */ + && OptimizationEnabled(db, SQLITE_OmitOrderBy) + ){ + SELECTTRACE(0x100,pParse,p, + ("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1)); + sqlite3ExprListDelete(db, pSub->pOrderBy); + pSub->pOrderBy = 0; + } + /* If the outer query contains a "complex" result set (that is, ** if the result set of the outer query uses functions or subqueries) ** and if the subquery contains an ORDER BY clause and if @@ -138587,9 +140935,9 @@ SQLITE_PRIVATE int sqlite3Select( ** inside the subquery. This can help the subquery to run more efficiently. */ if( OptimizationEnabled(db, SQLITE_PushDown) - && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) - && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor, - (pItem->fg.jointype & JT_OUTER)!=0) + && (pItem->fg.isCte==0 + || (pItem->u2.pCteUse->eM10d!=M10d_Yes && pItem->u2.pCteUse->nUse<2)) + && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem) ){ #if SELECTTRACE_ENABLED if( sqlite3SelectTrace & 0x100 ){ @@ -138648,6 +140996,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e); if( pItem->iCursor!=pCteUse->iCur ){ sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pCteUse->iCur); + VdbeComment((v, "%!S", pItem)); } pSub->nSelectRow = pCteUse->nRowEst; }else if( (pPrior = isSelfJoinView(pTabList, pItem))!=0 ){ @@ -138823,7 +141172,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Begin the database scan. */ SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy, - p->pEList, wctrlFlags, p->nSelectRow); + p->pEList, p, wctrlFlags, p->nSelectRow); if( pWInfo==0 ) goto select_end; if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){ p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo); @@ -138985,7 +141334,7 @@ SQLITE_PRIVATE int sqlite3Select( } for(i=0; inFunc; i++){ Expr *pExpr = pAggInfo->aFunc[i].pFExpr; - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + assert( ExprUseXList(pExpr) ); sNC.ncFlags |= NC_InAggFunc; sqlite3ExprAnalyzeAggList(&sNC, pExpr->x.pList); #ifndef SQLITE_OMIT_WINDOWFUNC @@ -139040,7 +141389,9 @@ SQLITE_PRIVATE int sqlite3Select( if( pAggInfo->nFunc==1 && pAggInfo->aFunc[0].iDistinct>=0 - && pAggInfo->aFunc[0].pFExpr->x.pList + && ALWAYS(pAggInfo->aFunc[0].pFExpr!=0) + && ALWAYS(ExprUseXList(pAggInfo->aFunc[0].pFExpr)) + && pAggInfo->aFunc[0].pFExpr->x.pList!=0 ){ Expr *pExpr = pAggInfo->aFunc[0].pFExpr->x.pList->a[0].pExpr; pExpr = sqlite3ExprDup(db, pExpr, 0); @@ -139085,7 +141436,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, pDistinct, - WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0) | distFlag, 0 + 0, (WHERE_GROUPBY|(orderByGrp ? WHERE_SORTBYGROUP : 0)|distFlag), 0 ); if( pWInfo==0 ){ sqlite3ExprListDelete(db, pDistinct); @@ -139361,6 +141712,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_Integer, 0, regAcc); } }else if( pAggInfo->nFunc==1 && pAggInfo->aFunc[0].iDistinct>=0 ){ + assert( ExprUseXList(pAggInfo->aFunc[0].pFExpr) ); pDistinct = pAggInfo->aFunc[0].pFExpr->x.pList; distFlag = pDistinct ? (WHERE_WANT_DISTINCT|WHERE_AGG_DISTINCT) : 0; } @@ -139382,7 +141734,7 @@ SQLITE_PRIVATE int sqlite3Select( SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy, - pDistinct, minMaxFlag|distFlag, 0); + pDistinct, 0, minMaxFlag|distFlag, 0); if( pWInfo==0 ){ goto select_end; } @@ -139439,7 +141791,7 @@ SQLITE_PRIVATE int sqlite3Select( */ select_end: assert( db->mallocFailed==0 || db->mallocFailed==1 ); - pParse->nErr += db->mallocFailed; + assert( db->mallocFailed==0 || pParse->nErr!=0 ); sqlite3ExprListDelete(db, pMinMaxOrderBy); #ifdef SQLITE_DEBUG if( pAggInfo && !db->mallocFailed ){ @@ -139892,12 +142244,12 @@ SQLITE_PRIVATE void sqlite3BeginTrigger( /* INSTEAD of triggers are only for views and views only support INSTEAD ** of triggers. */ - if( pTab->pSelect && tr_tm!=TK_INSTEAD ){ + if( IsView(pTab) && tr_tm!=TK_INSTEAD ){ sqlite3ErrorMsg(pParse, "cannot create %s trigger on view: %S", (tr_tm == TK_BEFORE)?"BEFORE":"AFTER", pTableName->a); goto trigger_orphan_error; } - if( !pTab->pSelect && tr_tm==TK_INSTEAD ){ + if( !IsView(pTab) && tr_tm==TK_INSTEAD ){ sqlite3ErrorMsg(pParse, "cannot create INSTEAD OF" " trigger on table: %S", pTableName->a); goto trigger_orphan_error; @@ -140034,7 +142386,7 @@ SQLITE_PRIVATE void sqlite3FinishTrigger( z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n); testcase( z==0 ); sqlite3NestedParse(pParse, - "INSERT INTO %Q." DFLT_SCHEMA_TABLE + "INSERT INTO %Q." LEGACY_SCHEMA_TABLE " VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')", db->aDb[iDb].zDbSName, zName, pTrig->table, z); @@ -140119,6 +142471,7 @@ static TriggerStep *triggerStepAllocate( sqlite3 *db = pParse->db; TriggerStep *pTriggerStep; + if( pParse->nErr ) return 0; pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n + 1); if( pTriggerStep ){ char *z = (char*)&pTriggerStep[1]; @@ -140348,7 +142701,7 @@ SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){ */ if( (v = sqlite3GetVdbe(pParse))!=0 ){ sqlite3NestedParse(pParse, - "DELETE FROM %Q." DFLT_SCHEMA_TABLE " WHERE name=%Q AND type='trigger'", + "DELETE FROM %Q." LEGACY_SCHEMA_TABLE " WHERE name=%Q AND type='trigger'", db->aDb[iDb].zDbSName, pTrigger->zName ); sqlite3ChangeCookie(pParse, iDb); @@ -140550,11 +142903,11 @@ static ExprList *sqlite3ExpandReturning( for(jj=0; jjnCol; jj++){ Expr *pNewExpr; if( IsHiddenColumn(pTab->aCol+jj) ) continue; - pNewExpr = sqlite3Expr(db, TK_ID, pTab->aCol[jj].zName); + pNewExpr = sqlite3Expr(db, TK_ID, pTab->aCol[jj].zCnName); pNew = sqlite3ExprListAppend(pParse, pNew, pNewExpr); if( !db->mallocFailed ){ struct ExprList_item *pItem = &pNew->a[pNew->nExpr-1]; - pItem->zEName = sqlite3DbStrDup(db, pTab->aCol[jj].zName); + pItem->zEName = sqlite3DbStrDup(db, pTab->aCol[jj].zCnName); pItem->eEName = ENAME_NAME; } } @@ -140591,6 +142944,7 @@ static void codeReturningTrigger( assert( v!=0 ); assert( pParse->bReturning ); + assert( db->pParse==pParse ); pReturning = pParse->u1.pReturning; assert( pTrigger == &(pReturning->retTrig) ); memset(&sSelect, 0, sizeof(sSelect)); @@ -140599,13 +142953,15 @@ static void codeReturningTrigger( sSelect.pSrc = &sFrom; sFrom.nSrc = 1; sFrom.a[0].pTab = pTab; + sFrom.a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); - if( db->mallocFailed==0 && pParse->nErr==0 ){ + if( pParse->nErr==0 ){ + assert( db->mallocFailed==0 ); sqlite3GenerateColumnNames(pParse, &sSelect); } sqlite3ExprListDelete(db, sSelect.pEList); pNew = sqlite3ExpandReturning(pParse, pReturning->pReturnEL, pTab); - if( pNew ){ + if( !db->mallocFailed ){ NameContext sNC; memset(&sNC, 0, sizeof(sNC)); if( pReturning->nRetCol==0 ){ @@ -140617,7 +142973,9 @@ static void codeReturningTrigger( sNC.ncFlags = NC_UBaseReg; pParse->eTriggerOp = pTrigger->op; pParse->pTriggerTab = pTab; - if( sqlite3ResolveExprListNames(&sNC, pNew)==SQLITE_OK ){ + if( sqlite3ResolveExprListNames(&sNC, pNew)==SQLITE_OK + && ALWAYS(!db->mallocFailed) + ){ int i; int nCol = pNew->nExpr; int reg = pParse->nMem+1; @@ -140625,16 +142983,20 @@ static void codeReturningTrigger( pReturning->iRetReg = reg; for(i=0; ia[i].pExpr; + assert( pCol!=0 ); /* Due to !db->mallocFailed ~9 lines above */ sqlite3ExprCodeFactorable(pParse, pCol, reg+i); + if( sqlite3ExprAffinity(pCol)==SQLITE_AFF_REAL ){ + sqlite3VdbeAddOp1(v, OP_RealAffinity, reg+i); + } } sqlite3VdbeAddOp3(v, OP_MakeRecord, reg, i, reg+i); sqlite3VdbeAddOp2(v, OP_NewRowid, pReturning->iRetCur, reg+i+1); sqlite3VdbeAddOp3(v, OP_Insert, pReturning->iRetCur, reg+i, reg+i+1); } - sqlite3ExprListDelete(db, pNew); - pParse->eTriggerOp = 0; - pParse->pTriggerTab = 0; } + sqlite3ExprListDelete(db, pNew); + pParse->eTriggerOp = 0; + pParse->pTriggerTab = 0; } @@ -140776,8 +143138,8 @@ static TriggerPrg *codeRowTrigger( Vdbe *v; /* Temporary VM */ NameContext sNC; /* Name context for sub-vdbe */ SubProgram *pProgram = 0; /* Sub-vdbe for trigger program */ - Parse *pSubParse; /* Parse context for sub-vdbe */ int iEndTrigger = 0; /* Label to jump to if WHEN is false */ + Parse sSubParse; /* Parse context for sub-vdbe */ assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); assert( pTop->pVdbe ); @@ -140799,19 +143161,17 @@ static TriggerPrg *codeRowTrigger( /* Allocate and populate a new Parse context to use for coding the ** trigger sub-program. */ - pSubParse = sqlite3StackAllocZero(db, sizeof(Parse)); - if( !pSubParse ) return 0; + sqlite3ParseObjectInit(&sSubParse, db); memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pSubParse; - pSubParse->db = db; - pSubParse->pTriggerTab = pTab; - pSubParse->pToplevel = pTop; - pSubParse->zAuthContext = pTrigger->zName; - pSubParse->eTriggerOp = pTrigger->op; - pSubParse->nQueryLoop = pParse->nQueryLoop; - pSubParse->disableVtab = pParse->disableVtab; - - v = sqlite3GetVdbe(pSubParse); + sNC.pParse = &sSubParse; + sSubParse.pTriggerTab = pTab; + sSubParse.pToplevel = pTop; + sSubParse.zAuthContext = pTrigger->zName; + sSubParse.eTriggerOp = pTrigger->op; + sSubParse.nQueryLoop = pParse->nQueryLoop; + sSubParse.disableVtab = pParse->disableVtab; + + v = sqlite3GetVdbe(&sSubParse); if( v ){ VdbeComment((v, "Start: %s.%s (%s %s%s%s ON %s)", pTrigger->zName, onErrorText(orconf), @@ -140837,14 +143197,14 @@ static TriggerPrg *codeRowTrigger( if( db->mallocFailed==0 && SQLITE_OK==sqlite3ResolveExprNames(&sNC, pWhen) ){ - iEndTrigger = sqlite3VdbeMakeLabel(pSubParse); - sqlite3ExprIfFalse(pSubParse, pWhen, iEndTrigger, SQLITE_JUMPIFNULL); + iEndTrigger = sqlite3VdbeMakeLabel(&sSubParse); + sqlite3ExprIfFalse(&sSubParse, pWhen, iEndTrigger, SQLITE_JUMPIFNULL); } sqlite3ExprDelete(db, pWhen); } /* Code the trigger program into the sub-vdbe. */ - codeTriggerProgram(pSubParse, pTrigger->step_list, orconf); + codeTriggerProgram(&sSubParse, pTrigger->step_list, orconf); /* Insert an OP_Halt at the end of the sub-program. */ if( iEndTrigger ){ @@ -140852,23 +143212,24 @@ static TriggerPrg *codeRowTrigger( } sqlite3VdbeAddOp0(v, OP_Halt); VdbeComment((v, "End: %s.%s", pTrigger->zName, onErrorText(orconf))); + transferParseError(pParse, &sSubParse); - transferParseError(pParse, pSubParse); - if( db->mallocFailed==0 && pParse->nErr==0 ){ + if( pParse->nErr==0 ){ + assert( db->mallocFailed==0 ); pProgram->aOp = sqlite3VdbeTakeOpArray(v, &pProgram->nOp, &pTop->nMaxArg); } - pProgram->nMem = pSubParse->nMem; - pProgram->nCsr = pSubParse->nTab; + pProgram->nMem = sSubParse.nMem; + pProgram->nCsr = sSubParse.nTab; pProgram->token = (void *)pTrigger; - pPrg->aColmask[0] = pSubParse->oldmask; - pPrg->aColmask[1] = pSubParse->newmask; + pPrg->aColmask[0] = sSubParse.oldmask; + pPrg->aColmask[1] = sSubParse.newmask; sqlite3VdbeDelete(v); + }else{ + transferParseError(pParse, &sSubParse); } - assert( !pSubParse->pTriggerPrg && !pSubParse->nMaxArg ); - sqlite3ParserReset(pSubParse); - sqlite3StackFree(db, pSubParse); - + assert( !sSubParse.pTriggerPrg && !sSubParse.nMaxArg ); + sqlite3ParseObjectReset(&sSubParse); return pPrg; } @@ -140901,6 +143262,7 @@ static TriggerPrg *getRowTrigger( /* If an existing TriggerPrg could not be located, create a new one. */ if( !pPrg ){ pPrg = codeRowTrigger(pParse, pTrigger, pTab, orconf); + pParse->db->errByteOffset = -1; } return pPrg; @@ -140923,7 +143285,7 @@ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( Vdbe *v = sqlite3GetVdbe(pParse); /* Main VM */ TriggerPrg *pPrg; pPrg = getRowTrigger(pParse, p, pTab, orconf); - assert( pPrg || pParse->nErr || pParse->db->mallocFailed ); + assert( pPrg || pParse->nErr ); /* Code the OP_Program opcode in the parent VDBE. P4 of the OP_Program ** is a pointer to the sub-vdbe containing the trigger program. */ @@ -141154,13 +143516,14 @@ static void updateVirtualTable( */ SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){ assert( pTab!=0 ); - if( !pTab->pSelect ){ + if( !IsView(pTab) ){ sqlite3_value *pValue = 0; u8 enc = ENC(sqlite3VdbeDb(v)); Column *pCol = &pTab->aCol[i]; - VdbeComment((v, "%s.%s", pTab->zName, pCol->zName)); + VdbeComment((v, "%s.%s", pTab->zName, pCol->zCnName)); assert( inCol ); - sqlite3ValueFromExpr(sqlite3VdbeDb(v), pCol->pDflt, enc, + sqlite3ValueFromExpr(sqlite3VdbeDb(v), + sqlite3ColumnExpr(pTab,pCol), enc, pCol->affinity, &pValue); if( pValue ){ sqlite3VdbeAppendP4(v, pValue, P4_MEM); @@ -141330,7 +143693,7 @@ static void updateFromSelect( pList = sqlite3ExprListAppend(pParse, pList, pNew); } eDest = IsVirtual(pTab) ? SRT_Table : SRT_Upfrom; - }else if( pTab->pSelect ){ + }else if( IsView(pTab) ){ for(i=0; inCol; i++){ pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i)); } @@ -141353,8 +143716,9 @@ static void updateFromSelect( } } pSelect = sqlite3SelectNew(pParse, pList, - pSrc, pWhere2, pGrp, 0, pOrderBy2, SF_UpdateFrom|SF_IncludeHidden, pLimit2 + pSrc, pWhere2, pGrp, 0, pOrderBy2, SF_UFSrcCheck|SF_IncludeHidden, pLimit2 ); + if( pSelect ) pSelect->selFlags |= SF_OrderByReqd; sqlite3SelectDestInit(&dest, eDest, iEph); dest.iSDParm2 = (pPk ? pPk->nKeyCol : -1); sqlite3Select(pParse, pSelect, &dest); @@ -141439,9 +143803,11 @@ SQLITE_PRIVATE void sqlite3Update( memset(&sContext, 0, sizeof(sContext)); db = pParse->db; - if( pParse->nErr || db->mallocFailed ){ + assert( db->pParse==pParse ); + if( pParse->nErr ){ goto update_cleanup; } + assert( db->mallocFailed==0 ); /* Locate the table which we want to update. */ @@ -141454,7 +143820,7 @@ SQLITE_PRIVATE void sqlite3Update( */ #ifndef SQLITE_OMIT_TRIGGER pTrigger = sqlite3TriggersExist(pParse, pTab, TK_UPDATE, pChanges, &tmask); - isView = pTab->pSelect!=0; + isView = IsView(pTab); assert( pTrigger || tmask==0 ); #else # define pTrigger 0 @@ -141543,13 +143909,16 @@ SQLITE_PRIVATE void sqlite3Update( */ chngRowid = chngPk = 0; for(i=0; inExpr; i++){ + u8 hCol = sqlite3StrIHash(pChanges->a[i].zEName); /* If this is an UPDATE with a FROM clause, do not resolve expressions ** here. The call to sqlite3Select() below will do that. */ if( nChangeFrom==0 && sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ goto update_cleanup; } for(j=0; jnCol; j++){ - if( sqlite3StrICmp(pTab->aCol[j].zName, pChanges->a[i].zEName)==0 ){ + if( pTab->aCol[j].hName==hCol + && sqlite3StrICmp(pTab->aCol[j].zCnName, pChanges->a[i].zEName)==0 + ){ if( j==pTab->iPKey ){ chngRowid = 1; pRowidExpr = pChanges->a[i].pExpr; @@ -141563,7 +143932,7 @@ SQLITE_PRIVATE void sqlite3Update( testcase( pTab->aCol[j].colFlags & COLFLAG_STORED ); sqlite3ErrorMsg(pParse, "cannot UPDATE generated column \"%s\"", - pTab->aCol[j].zName); + pTab->aCol[j].zCnName); goto update_cleanup; } #endif @@ -141587,7 +143956,7 @@ SQLITE_PRIVATE void sqlite3Update( { int rc; rc = sqlite3AuthCheck(pParse, SQLITE_UPDATE, pTab->zName, - j<0 ? "ROWID" : pTab->aCol[j].zName, + j<0 ? "ROWID" : pTab->aCol[j].zCnName, db->aDb[iDb].zDbSName); if( rc==SQLITE_DENY ){ goto update_cleanup; @@ -141619,8 +143988,10 @@ SQLITE_PRIVATE void sqlite3Update( for(i=0; inCol; i++){ if( aXRef[i]>=0 ) continue; if( (pTab->aCol[i].colFlags & COLFLAG_GENERATED)==0 ) continue; - if( sqlite3ExprReferencesUpdatedColumn(pTab->aCol[i].pDflt, - aXRef, chngRowid) ){ + if( sqlite3ExprReferencesUpdatedColumn( + sqlite3ColumnExpr(pTab, &pTab->aCol[i]), + aXRef, chngRowid) + ){ aXRef[i] = 99999; bProgress = 1; } @@ -141808,7 +144179,7 @@ SQLITE_PRIVATE void sqlite3Update( if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){ flags |= WHERE_ONEPASS_MULTIROW; } - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, flags,iIdxCur); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere,0,0,0,flags,iIdxCur); if( pWInfo==0 ) goto update_cleanup; /* A one-pass strategy that might update more than one row may not @@ -142208,9 +144579,7 @@ SQLITE_PRIVATE void sqlite3Update( ** that information. */ if( regRowCount ){ - sqlite3VdbeAddOp2(v, OP_ChngCntRow, regRowCount, 1); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", SQLITE_STATIC); + sqlite3CodeChangeCount(v, regRowCount, "rows updated"); } update_cleanup: @@ -142332,7 +144701,9 @@ static void updateVirtualTable( regRowid = ++pParse->nMem; /* Start scanning the virtual table */ - pWInfo = sqlite3WhereBegin(pParse, pSrc,pWhere,0,0,WHERE_ONEPASS_DESIRED,0); + pWInfo = sqlite3WhereBegin( + pParse, pSrc, pWhere, 0, 0, 0, WHERE_ONEPASS_DESIRED, 0 + ); if( pWInfo==0 ) return; /* Populate the argument registers. */ @@ -142712,7 +145083,7 @@ SQLITE_PRIVATE void sqlite3UpsertDoUpdate( k = sqlite3TableColumnToIndex(pIdx, pPk->aiColumn[i]); sqlite3VdbeAddOp3(v, OP_Column, iCur, k, iPk+i); VdbeComment((v, "%s.%s", pIdx->zName, - pTab->aCol[pPk->aiColumn[i]].zName)); + pTab->aCol[pPk->aiColumn[i]].zCnName)); } sqlite3VdbeVerifyAbortable(v, OE_Abort); i = sqlite3VdbeAddOp4Int(v, OP_Found, iDataCur, 0, iPk, nPk); @@ -142894,8 +145265,8 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( Btree *pTemp; /* The temporary database we vacuum into */ u32 saved_mDbFlags; /* Saved value of db->mDbFlags */ u64 saved_flags; /* Saved value of db->flags */ - int saved_nChange; /* Saved value of db->nChange */ - int saved_nTotalChange; /* Saved value of db->nTotalChange */ + i64 saved_nChange; /* Saved value of db->nChange */ + i64 saved_nTotalChange; /* Saved value of db->nTotalChange */ u32 saved_openFlags; /* Saved value of db->openFlags */ u8 saved_mTrace; /* Saved trace settings */ Db *pDb = 0; /* Database to detach at end of vacuum */ @@ -142993,7 +145364,9 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( /* Do not attempt to change the page size for a WAL database */ if( sqlite3PagerGetJournalMode(sqlite3BtreePager(pMain)) - ==PAGER_JOURNALMODE_WAL ){ + ==PAGER_JOURNALMODE_WAL + && pOut==0 + ){ db->nextPagesize = 0; } @@ -143342,7 +145715,7 @@ SQLITE_PRIVATE void sqlite3VtabLock(VTable *pVTab){ SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3 *db, Table *pTab){ VTable *pVtab; assert( IsVirtual(pTab) ); - for(pVtab=pTab->pVTable; pVtab && pVtab->db!=db; pVtab=pVtab->pNext); + for(pVtab=pTab->u.vtab.p; pVtab && pVtab->db!=db; pVtab=pVtab->pNext); return pVtab; } @@ -143355,7 +145728,8 @@ SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *pVTab){ assert( db ); assert( pVTab->nRef>0 ); - assert( db->magic==SQLITE_MAGIC_OPEN || db->magic==SQLITE_MAGIC_ZOMBIE ); + assert( db->eOpenState==SQLITE_STATE_OPEN + || db->eOpenState==SQLITE_STATE_ZOMBIE ); pVTab->nRef--; if( pVTab->nRef==0 ){ @@ -143370,21 +145744,24 @@ SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *pVTab){ /* ** Table p is a virtual table. This function moves all elements in the -** p->pVTable list to the sqlite3.pDisconnect lists of their associated +** p->u.vtab.p list to the sqlite3.pDisconnect lists of their associated ** database connections to be disconnected at the next opportunity. ** Except, if argument db is not NULL, then the entry associated with -** connection db is left in the p->pVTable list. +** connection db is left in the p->u.vtab.p list. */ static VTable *vtabDisconnectAll(sqlite3 *db, Table *p){ VTable *pRet = 0; - VTable *pVTable = p->pVTable; - p->pVTable = 0; + VTable *pVTable; + + assert( IsVirtual(p) ); + pVTable = p->u.vtab.p; + p->u.vtab.p = 0; /* Assert that the mutex (if any) associated with the BtShared database ** that contains table p is held by the caller. See header comments ** above function sqlite3VtabUnlockList() for an explanation of why ** this makes it safe to access the sqlite3.pDisconnect list of any - ** database connection that may have an entry in the p->pVTable list. + ** database connection that may have an entry in the p->u.vtab.p list. */ assert( db==0 || sqlite3SchemaMutexHeld(db, 0, p->pSchema) ); @@ -143394,7 +145771,7 @@ static VTable *vtabDisconnectAll(sqlite3 *db, Table *p){ assert( db2 ); if( db2==db ){ pRet = pVTable; - p->pVTable = pRet; + p->u.vtab.p = pRet; pRet->pNext = 0; }else{ pVTable->pNext = db2->pDisconnect; @@ -143422,7 +145799,7 @@ SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p){ assert( sqlite3BtreeHoldsAllMutexes(db) ); assert( sqlite3_mutex_held(db->mutex) ); - for(ppVTab=&p->pVTable; *ppVTab; ppVTab=&(*ppVTab)->pNext){ + for(ppVTab=&p->u.vtab.p; *ppVTab; ppVTab=&(*ppVTab)->pNext){ if( (*ppVTab)->db==db ){ VTable *pVTab = *ppVTab; *ppVTab = pVTab->pNext; @@ -143485,37 +145862,41 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){ ** database connection. */ SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table *p){ + assert( IsVirtual(p) ); if( !db || db->pnBytesFreed==0 ) vtabDisconnectAll(0, p); - if( p->azModuleArg ){ + if( p->u.vtab.azArg ){ int i; - for(i=0; inModuleArg; i++){ - if( i!=1 ) sqlite3DbFree(db, p->azModuleArg[i]); + for(i=0; iu.vtab.nArg; i++){ + if( i!=1 ) sqlite3DbFree(db, p->u.vtab.azArg[i]); } - sqlite3DbFree(db, p->azModuleArg); + sqlite3DbFree(db, p->u.vtab.azArg); } } /* -** Add a new module argument to pTable->azModuleArg[]. +** Add a new module argument to pTable->u.vtab.azArg[]. ** The string is not copied - the pointer is stored. The ** string will be freed automatically when the table is ** deleted. */ static void addModuleArgument(Parse *pParse, Table *pTable, char *zArg){ - sqlite3_int64 nBytes = sizeof(char *)*(2+pTable->nModuleArg); + sqlite3_int64 nBytes; char **azModuleArg; sqlite3 *db = pParse->db; - if( pTable->nModuleArg+3>=db->aLimit[SQLITE_LIMIT_COLUMN] ){ + + assert( IsVirtual(pTable) ); + nBytes = sizeof(char *)*(2+pTable->u.vtab.nArg); + if( pTable->u.vtab.nArg+3>=db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many columns on %s", pTable->zName); } - azModuleArg = sqlite3DbRealloc(db, pTable->azModuleArg, nBytes); + azModuleArg = sqlite3DbRealloc(db, pTable->u.vtab.azArg, nBytes); if( azModuleArg==0 ){ sqlite3DbFree(db, zArg); }else{ - int i = pTable->nModuleArg++; + int i = pTable->u.vtab.nArg++; azModuleArg[i] = zArg; azModuleArg[i+1] = 0; - pTable->azModuleArg = azModuleArg; + pTable->u.vtab.azArg = azModuleArg; } } @@ -143538,10 +145919,11 @@ SQLITE_PRIVATE void sqlite3VtabBeginParse( pTable = pParse->pNewTable; if( pTable==0 ) return; assert( 0==pTable->pIndex ); + pTable->eTabType = TABTYP_VTAB; db = pParse->db; - assert( pTable->nModuleArg==0 ); + assert( pTable->u.vtab.nArg==0 ); addModuleArgument(pParse, pTable, sqlite3NameFromToken(db, pModuleName)); addModuleArgument(pParse, pTable, 0); addModuleArgument(pParse, pTable, sqlite3DbStrDup(db, pTable->zName)); @@ -143558,11 +145940,11 @@ SQLITE_PRIVATE void sqlite3VtabBeginParse( ** sqlite_schema table, has already been made by sqlite3StartTable(). ** The second call, to obtain permission to create the table, is made now. */ - if( pTable->azModuleArg ){ + if( pTable->u.vtab.azArg ){ int iDb = sqlite3SchemaToIndex(db, pTable->pSchema); assert( iDb>=0 ); /* The database the table is being created in */ sqlite3AuthCheck(pParse, SQLITE_CREATE_VTABLE, pTable->zName, - pTable->azModuleArg[0], pParse->db->aDb[iDb].zDbSName); + pTable->u.vtab.azArg[0], pParse->db->aDb[iDb].zDbSName); } #endif } @@ -143590,9 +145972,10 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ sqlite3 *db = pParse->db; /* The database connection */ if( pTab==0 ) return; + assert( IsVirtual(pTab) ); addArgumentToVtab(pParse); pParse->sArg.z = 0; - if( pTab->nModuleArg<1 ) return; + if( pTab->u.vtab.nArg<1 ) return; /* If the CREATE VIRTUAL TABLE statement is being entered for the ** first time (in other words if the virtual table is actually being @@ -143625,7 +146008,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); sqlite3NestedParse(pParse, - "UPDATE %Q." DFLT_SCHEMA_TABLE " " + "UPDATE %Q." LEGACY_SCHEMA_TABLE " " "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " "WHERE rowid=#%d", db->aDb[iDb].zDbSName, @@ -143645,18 +146028,14 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ iReg = ++pParse->nMem; sqlite3VdbeLoadString(v, iReg, pTab->zName); sqlite3VdbeAddOp2(v, OP_VCreate, iDb, iReg); - } - - /* If we are rereading the sqlite_schema table create the in-memory - ** record of the table. The xConnect() method is not called until - ** the first time the virtual table is used in an SQL statement. This - ** allows a schema that contains virtual tables to be loaded before - ** the required virtual table implementations are registered. */ - else { + }else{ + /* If we are rereading the sqlite_schema table create the in-memory + ** record of the table. */ Table *pOld; Schema *pSchema = pTab->pSchema; const char *zName = pTab->zName; - assert( sqlite3SchemaMutexHeld(db, 0, pSchema) ); + assert( zName!=0 ); + sqlite3MarkAllShadowTablesOf(db, pTab); pOld = sqlite3HashInsert(&pSchema->tblHash, zName, pTab); if( pOld ){ sqlite3OomFault(db); @@ -143707,13 +146086,16 @@ static int vtabCallConstructor( VtabCtx sCtx; VTable *pVTable; int rc; - const char *const*azArg = (const char *const*)pTab->azModuleArg; - int nArg = pTab->nModuleArg; + const char *const*azArg; + int nArg = pTab->u.vtab.nArg; char *zErr = 0; char *zModuleName; int iDb; VtabCtx *pCtx; + assert( IsVirtual(pTab) ); + azArg = (const char *const*)pTab->u.vtab.azArg; + /* Check that the virtual-table is not already being initialized */ for(pCtx=db->pVtabCtx; pCtx; pCtx=pCtx->pPrior){ if( pCtx->pTab==pTab ){ @@ -143740,7 +146122,7 @@ static int vtabCallConstructor( pVTable->eVtabRisk = SQLITE_VTABRISK_Normal; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - pTab->azModuleArg[1] = db->aDb[iDb].zDbSName; + pTab->u.vtab.azArg[1] = db->aDb[iDb].zDbSName; /* Invoke the virtual table constructor */ assert( &db->pVtabCtx ); @@ -143779,12 +146161,12 @@ static int vtabCallConstructor( int iCol; u16 oooHidden = 0; /* If everything went according to plan, link the new VTable structure - ** into the linked list headed by pTab->pVTable. Then loop through the + ** into the linked list headed by pTab->u.vtab.p. Then loop through the ** columns of the table to see if any of them contain the token "hidden". ** If so, set the Column COLFLAG_HIDDEN flag and remove the token from ** the type string. */ - pVTable->pNext = pTab->pVTable; - pTab->pVTable = pVTable; + pVTable->pNext = pTab->u.vtab.p; + pTab->u.vtab.p = pVTable; for(iCol=0; iColnCol; iCol++){ char *zType = sqlite3ColumnType(&pTab->aCol[iCol], ""); @@ -143837,16 +146219,17 @@ SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse *pParse, Table *pTab){ int rc; assert( pTab ); - if( !IsVirtual(pTab) || sqlite3GetVTable(db, pTab) ){ + assert( IsVirtual(pTab) ); + if( sqlite3GetVTable(db, pTab) ){ return SQLITE_OK; } /* Locate the required virtual table module */ - zMod = pTab->azModuleArg[0]; + zMod = pTab->u.vtab.azArg[0]; pMod = (Module*)sqlite3HashFind(&db->aModule, zMod); if( !pMod ){ - const char *zModule = pTab->azModuleArg[0]; + const char *zModule = pTab->u.vtab.azArg[0]; sqlite3ErrorMsg(pParse, "no such module: %s", zModule); rc = SQLITE_ERROR; }else{ @@ -143909,10 +146292,10 @@ SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab, const char *zMod; pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zDbSName); - assert( pTab && IsVirtual(pTab) && !pTab->pVTable ); + assert( pTab && IsVirtual(pTab) && !pTab->u.vtab.p ); /* Locate the required virtual table module */ - zMod = pTab->azModuleArg[0]; + zMod = pTab->u.vtab.azArg[0]; pMod = (Module*)sqlite3HashFind(&db->aModule, zMod); /* If the module has been registered and includes a Create method, @@ -143947,8 +146330,8 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ VtabCtx *pCtx; int rc = SQLITE_OK; Table *pTab; - char *zErr = 0; Parse sParse; + int initBusy; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zCreateTable==0 ){ @@ -143965,20 +146348,27 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ pTab = pCtx->pTab; assert( IsVirtual(pTab) ); - memset(&sParse, 0, sizeof(sParse)); + sqlite3ParseObjectInit(&sParse, db); sParse.eParseMode = PARSE_MODE_DECLARE_VTAB; - sParse.db = db; + sParse.disableTriggers = 1; + /* We should never be able to reach this point while loading the + ** schema. Nevertheless, defend against that (turn off db->init.busy) + ** in case a bug arises. */ + assert( db->init.busy==0 ); + initBusy = db->init.busy; + db->init.busy = 0; sParse.nQueryLoop = 1; - if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable, &zErr) - && sParse.pNewTable - && !db->mallocFailed - && !sParse.pNewTable->pSelect - && !IsVirtual(sParse.pNewTable) + if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable) + && ALWAYS(sParse.pNewTable!=0) + && ALWAYS(!db->mallocFailed) + && IsOrdinaryTable(sParse.pNewTable) ){ + assert( sParse.zErrMsg==0 ); if( !pTab->aCol ){ Table *pNew = sParse.pNewTable; Index *pIdx; pTab->aCol = pNew->aCol; + sqlite3ExprListDelete(db, pNew->u.tab.pDfltList); pTab->nNVCol = pTab->nCol = pNew->nCol; pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid); pNew->nCol = 0; @@ -144003,8 +146393,9 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ } pCtx->bDeclared = 1; }else{ - sqlite3ErrorWithMsg(db, SQLITE_ERROR, (zErr ? "%s" : 0), zErr); - sqlite3DbFree(db, zErr); + sqlite3ErrorWithMsg(db, SQLITE_ERROR, + (sParse.zErrMsg ? "%s" : 0), sParse.zErrMsg); + sqlite3DbFree(db, sParse.zErrMsg); rc = SQLITE_ERROR; } sParse.eParseMode = PARSE_MODE_NORMAL; @@ -144013,7 +146404,8 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ sqlite3VdbeFinalize(sParse.pVdbe); } sqlite3DeleteTable(db, sParse.pNewTable); - sqlite3ParserReset(&sParse); + sqlite3ParseObjectReset(&sParse); + db->init.busy = initBusy; assert( (rc&0xff)==rc ); rc = sqlite3ApiExit(db, rc); @@ -144033,10 +146425,13 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab Table *pTab; pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zDbSName); - if( pTab!=0 && ALWAYS(pTab->pVTable!=0) ){ + if( ALWAYS(pTab!=0) + && ALWAYS(IsVirtual(pTab)) + && ALWAYS(pTab->u.vtab.p!=0) + ){ VTable *p; int (*xDestroy)(sqlite3_vtab *); - for(p=pTab->pVTable; p; p=p->pNext){ + for(p=pTab->u.vtab.p; p; p=p->pNext){ assert( p->pVtab ); if( p->pVtab->nRef>0 ){ return SQLITE_LOCKED; @@ -144050,9 +146445,9 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab rc = xDestroy(p->pVtab); /* Remove the sqlite3_vtab* from the aVTrans[] array, if applicable */ if( rc==SQLITE_OK ){ - assert( pTab->pVTable==p && p->pNext==0 ); + assert( pTab->u.vtab.p==p && p->pNext==0 ); p->pVtab = 0; - pTab->pVTable = 0; + pTab->u.vtab.p = 0; sqlite3VtabUnlock(p); } sqlite3DeleteTable(db, pTab); @@ -144266,6 +146661,7 @@ SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction( /* Check to see the left operand is a column in a virtual table */ if( NEVER(pExpr==0) ) return pDef; if( pExpr->op!=TK_COLUMN ) return pDef; + assert( ExprUseYTab(pExpr) ); pTab = pExpr->y.pTab; if( pTab==0 ) return pDef; if( !IsVirtual(pTab) ) return pDef; @@ -144340,8 +146736,9 @@ SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){ /* ** Check to see if virtual table module pMod can be have an eponymous ** virtual table instance. If it can, create one if one does not already -** exist. Return non-zero if the eponymous virtual table instance exists -** when this routine returns, and return zero if it does not exist. +** exist. Return non-zero if either the eponymous virtual table instance +** exists when this routine returns or if an attempt to create it failed +** and an error message was left in pParse. ** ** An eponymous virtual table instance is one that is named after its ** module, and more importantly, does not require a CREATE VIRTUAL TABLE @@ -144368,8 +146765,9 @@ SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){ } pMod->pEpoTab = pTab; pTab->nTabRef = 1; + pTab->eTabType = TABTYP_VTAB; pTab->pSchema = db->aDb[0].pSchema; - assert( pTab->nModuleArg==0 ); + assert( pTab->u.vtab.nArg==0 ); pTab->iPKey = -1; pTab->tabFlags |= TF_Eponymous; addModuleArgument(pParse, pTab, sqlite3DbStrDup(db, pTab->zName)); @@ -144380,7 +146778,6 @@ SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){ sqlite3ErrorMsg(pParse, "%s", zErr); sqlite3DbFree(db, zErr); sqlite3VtabEponymousTableClear(db, pMod); - return 0; } return 1; } @@ -144559,6 +146956,7 @@ struct WhereLevel { u32 iLikeRepCntr; /* LIKE range processing counter register (times 2) */ int addrLikeRep; /* LIKE range processing address */ #endif + int regFilter; /* Bloom filter */ u8 iFrom; /* Which entry in the FROM clause */ u8 op, p3, p5; /* Opcode, P3 & P5 of the opcode that ends the loop */ int p1, p2; /* Operands of the opcode used to end the loop */ @@ -144573,7 +146971,7 @@ struct WhereLevel { u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */ } *aInLoop; /* Information about each nested IN operator */ } in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */ - Index *pCovidx; /* Possible covering index for WHERE_MULTI_OR */ + Index *pCoveringIdx; /* Possible covering index for WHERE_MULTI_OR */ } u; struct WhereLoop *pWLoop; /* The selected WhereLoop object */ Bitmask notReady; /* FROM entries not usable at this level */ @@ -144617,10 +147015,12 @@ struct WhereLoop { } btree; struct { /* Information for virtual tables */ int idxNum; /* Index number */ - u8 needFree; /* True if sqlite3_free(idxStr) is needed */ + u32 needFree : 1; /* True if sqlite3_free(idxStr) is needed */ + u32 bOmitOffset : 1; /* True to let virtual table handle offset */ i8 isOrdered; /* True if satisfies ORDER BY */ u16 omitMask; /* Terms that may be omitted */ char *idxStr; /* Index identifier string */ + u32 mHandleIn; /* Terms to handle as IN(...) instead of == */ } vtab; } u; u32 wsFlags; /* WHERE_* flags describing the plan */ @@ -144764,7 +147164,7 @@ struct WhereTerm { #define TERM_COPIED 0x0008 /* Has a child */ #define TERM_ORINFO 0x0010 /* Need to free the WhereTerm.u.pOrInfo object */ #define TERM_ANDINFO 0x0020 /* Need to free the WhereTerm.u.pAndInfo obj */ -#define TERM_OR_OK 0x0040 /* Used during OR-clause processing */ +#define TERM_OK 0x0040 /* Used during OR-clause processing */ #define TERM_VNULL 0x0080 /* Manufactured x>NULL or x<=NULL term */ #define TERM_LIKEOPT 0x0100 /* Virtual terms from the LIKE optimization */ #define TERM_LIKECOND 0x0200 /* Conditionally this LIKE operator term */ @@ -144777,6 +147177,7 @@ struct WhereTerm { #else # define TERM_HIGHTRUTH 0 /* Only used with STAT4 */ #endif +#define TERM_SLICE 0x8000 /* One slice of a row-value/vector comparison */ /* ** An instance of the WhereScan object is used as an iterator for locating @@ -144787,11 +147188,11 @@ struct WhereScan { WhereClause *pWC; /* WhereClause currently being scanned */ const char *zCollName; /* Required collating sequence, if not NULL */ Expr *pIdxExpr; /* Search for this index expression */ + int k; /* Resume scanning at this->pWC->a[this->k] */ + u32 opMask; /* Acceptable operators */ char idxaff; /* Must match this affinity, if zCollName!=NULL */ + unsigned char iEquiv; /* Current slot in aiCur[] and aiColumn[] */ unsigned char nEquiv; /* Number of entries in aiCur[] and aiColumn[] */ - unsigned char iEquiv; /* Next unused slot in aiCur[] and aiColumn[] */ - u32 opMask; /* Acceptable operators */ - int k; /* Resume scanning at this->pWC->a[this->k] */ int aiCur[11]; /* Cursors in the equivalence class */ i16 aiColumn[11]; /* Corresponding column number in the eq-class */ }; @@ -144815,6 +147216,7 @@ struct WhereClause { u8 hasOr; /* True if any a[].eOperator is WO_OR */ int nTerm; /* Number of terms */ int nSlot; /* Number of entries in a[] */ + int nBase; /* Number of terms through the last non-Virtual */ WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */ #if defined(SQLITE_SMALL_STACK) WhereTerm aStatic[1]; /* Initial static space for a[] */ @@ -144872,11 +147274,6 @@ struct WhereMaskSet { int ix[BMS]; /* Cursor assigned to each bit */ }; -/* -** Initialize a WhereMaskSet object -*/ -#define initMaskSet(P) (P)->n=0 - /* ** This object is a convenience wrapper holding all information needed ** to construct WhereLoop objects for a particular query. @@ -144884,7 +147281,6 @@ struct WhereMaskSet { struct WhereLoopBuilder { WhereInfo *pWInfo; /* Information about this WHERE */ WhereClause *pWC; /* WHERE clause terms */ - ExprList *pOrderBy; /* ORDER BY clause */ WhereLoop *pNew; /* Template WhereLoop */ WhereOrSet *pOrSet; /* Record best loops here, if not NULL */ #ifdef SQLITE_ENABLE_STAT4 @@ -144952,6 +147348,9 @@ struct WhereInfo { ExprList *pOrderBy; /* The ORDER BY clause or NULL */ ExprList *pResultSet; /* Result set of the query */ Expr *pWhere; /* The complete WHERE clause */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + Select *pLimit; /* Used to access LIMIT expr/registers for vtabs */ +#endif int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */ int iContinue; /* Jump here to continue with next record */ int iBreak; /* Jump here to break out of the loop */ @@ -145005,8 +147404,14 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ); +SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( + const Parse *pParse, /* Parse context */ + const WhereInfo *pWInfo, /* WHERE clause */ + const WhereLevel *pLevel /* Bloom filter on this level */ +); #else # define sqlite3WhereExplainOneScan(u,v,w,x) 0 +# define sqlite3WhereExplainBloomFilter(u,v,w) 0 #endif /* SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3WhereAddScanStatus( @@ -145031,6 +147436,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( SQLITE_PRIVATE void sqlite3WhereClauseInit(WhereClause*,WhereInfo*); SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause*); SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause*,Expr*,u8); +SQLITE_PRIVATE void sqlite3WhereAddLimit(WhereClause*, Select*); SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet*, Expr*); SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet*, Expr*); SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet*, ExprList*); @@ -145099,6 +147505,9 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*); #define WHERE_BIGNULL_SORT 0x00080000 /* Column nEq of index is BIGNULL */ #define WHERE_IN_SEEKSCAN 0x00100000 /* Seek-scan optimization for IN */ #define WHERE_TRANSCONS 0x00200000 /* Uses a transitive constraint */ +#define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */ +#define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */ +#define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */ #endif /* !defined(SQLITE_WHEREINT_H) */ @@ -145114,7 +147523,7 @@ static const char *explainIndexColumnName(Index *pIdx, int i){ i = pIdx->aiColumn[i]; if( i==XN_EXPR ) return ""; if( i==XN_ROWID ) return "rowid"; - return pIdx->pTable->aCol[i].zName; + return pIdx->pTable->aCol[i].zCnName; } /* @@ -145261,19 +147670,27 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( explainIndexRange(&str, pLoop); } }else if( (flags & WHERE_IPK)!=0 && (flags & WHERE_CONSTRAINT)!=0 ){ - const char *zRangeOp; + char cRangeOp; +#if 0 /* Better output, but breaks many tests */ + const Table *pTab = pItem->pTab; + const char *zRowid = pTab->iPKey>=0 ? pTab->aCol[pTab->iPKey].zCnName: + "rowid"; +#else + const char *zRowid = "rowid"; +#endif + sqlite3_str_appendf(&str, " USING INTEGER PRIMARY KEY (%s", zRowid); if( flags&(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) ){ - zRangeOp = "="; + cRangeOp = '='; }else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){ - zRangeOp = ">? AND rowid<"; + sqlite3_str_appendf(&str, ">? AND %s", zRowid); + cRangeOp = '<'; }else if( flags&WHERE_BTM_LIMIT ){ - zRangeOp = ">"; + cRangeOp = '>'; }else{ assert( flags&WHERE_TOP_LIMIT); - zRangeOp = "<"; + cRangeOp = '<'; } - sqlite3_str_appendf(&str, - " USING INTEGER PRIMARY KEY (rowid%s?)",zRangeOp); + sqlite3_str_appendf(&str, "%c?)", cRangeOp); } #ifndef SQLITE_OMIT_VIRTUALTABLE else if( (flags & WHERE_VIRTUALTABLE)!=0 ){ @@ -145296,6 +147713,56 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( } return ret; } + +/* +** Add a single OP_Explain opcode that describes a Bloom filter. +** +** Or if not processing EXPLAIN QUERY PLAN and not in a SQLITE_DEBUG and/or +** SQLITE_ENABLE_STMT_SCANSTATUS build, then OP_Explain opcodes are not +** required and this routine is a no-op. +** +** If an OP_Explain opcode is added to the VM, its address is returned. +** Otherwise, if no OP_Explain is coded, zero is returned. +*/ +SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( + const Parse *pParse, /* Parse context */ + const WhereInfo *pWInfo, /* WHERE clause */ + const WhereLevel *pLevel /* Bloom filter on this level */ +){ + int ret = 0; + SrcItem *pItem = &pWInfo->pTabList->a[pLevel->iFrom]; + Vdbe *v = pParse->pVdbe; /* VM being constructed */ + sqlite3 *db = pParse->db; /* Database handle */ + char *zMsg; /* Text to add to EQP output */ + int i; /* Loop counter */ + WhereLoop *pLoop; /* The where loop */ + StrAccum str; /* EQP output string */ + char zBuf[100]; /* Initial space for EQP output string */ + + sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH); + str.printfFlags = SQLITE_PRINTF_INTERNAL; + sqlite3_str_appendf(&str, "BLOOM FILTER ON %S (", pItem); + pLoop = pLevel->pWLoop; + if( pLoop->wsFlags & WHERE_IPK ){ + const Table *pTab = pItem->pTab; + if( pTab->iPKey>=0 ){ + sqlite3_str_appendf(&str, "%s=?", pTab->aCol[pTab->iPKey].zCnName); + }else{ + sqlite3_str_appendf(&str, "rowid=?"); + } + }else{ + for(i=pLoop->nSkip; iu.btree.nEq; i++){ + const char *z = explainIndexColumnName(pLoop->u.btree.pIndex, i); + if( i>pLoop->nSkip ) sqlite3_str_append(&str, " AND ", 5); + sqlite3_str_appendf(&str, "%s=?", z); + } + } + sqlite3_str_append(&str, ")", 1); + zMsg = sqlite3StrAccumFinish(&str); + ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v), + pParse->addrExplain, 0, zMsg,P4_DYNAMIC); + return ret; +} #endif /* SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS @@ -145501,16 +147968,23 @@ static Expr *removeUnindexableInClauseTerms( Expr *pNew; pNew = sqlite3ExprDup(db, pX, 0); if( db->mallocFailed==0 ){ - ExprList *pOrigRhs = pNew->x.pSelect->pEList; /* Original unmodified RHS */ - ExprList *pOrigLhs = pNew->pLeft->x.pList; /* Original unmodified LHS */ + ExprList *pOrigRhs; /* Original unmodified RHS */ + ExprList *pOrigLhs; /* Original unmodified LHS */ ExprList *pRhs = 0; /* New RHS after modifications */ ExprList *pLhs = 0; /* New LHS after mods */ int i; /* Loop counter */ Select *pSelect; /* Pointer to the SELECT on the RHS */ + assert( ExprUseXSelect(pNew) ); + pOrigRhs = pNew->x.pSelect->pEList; + assert( pNew->pLeft!=0 ); + assert( ExprUseXList(pNew->pLeft) ); + pOrigLhs = pNew->pLeft->x.pList; for(i=iEq; inLTerm; i++){ if( pLoop->aLTerm[i]->pExpr==pX ){ - int iField = pLoop->aLTerm[i]->u.x.iField - 1; + int iField; + assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 ); + iField = pLoop->aLTerm[i]->u.x.iField - 1; if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); pOrigRhs->a[iField].pExpr = 0; @@ -145625,7 +148099,7 @@ static int codeEqualityTerm( } iTab = 0; - if( (pX->flags & EP_xIsSelect)==0 || pX->x.pSelect->pEList->nExpr==1 ){ + if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); }else{ sqlite3 *db = pParse->db; @@ -145647,8 +148121,8 @@ static int codeEqualityTerm( sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); VdbeCoverageIf(v, bRev); VdbeCoverageIf(v, !bRev); - assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); + assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); pLoop->wsFlags |= WHERE_IN_ABLE; if( pLevel->u.in.nIn==0 ){ pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); @@ -145812,6 +148286,7 @@ static int codeAllEqualityTerms( VdbeCoverageIf(v, bRev!=0); VdbeComment((v, "begin skip-scan on %s", pIdx->zName)); j = sqlite3VdbeAddOp0(v, OP_Goto); + assert( pLevel->addrSkip==0 ); pLevel->addrSkip = sqlite3VdbeAddOp4Int(v, (bRev?OP_SeekLT:OP_SeekGT), iIdxCur, 0, regBase, nSkip); VdbeCoverageIf(v, bRev==0); @@ -145844,6 +148319,9 @@ static int codeAllEqualityTerms( sqlite3VdbeAddOp2(v, OP_Copy, r1, regBase+j); } } + } + for(j=nSkip; jaLTerm[j]; if( pTerm->eOperator & WO_IN ){ if( pTerm->pExpr->flags & EP_xIsSelect ){ /* No affinity ever needs to be (or should be) applied to a value @@ -145858,7 +148336,8 @@ static int codeAllEqualityTerms( sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk); VdbeCoverage(v); } - if( pParse->db->mallocFailed==0 && pParse->nErr==0 ){ + if( pParse->nErr==0 ){ + assert( pParse->db->mallocFailed==0 ); if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_BLOB ){ zAff[j] = SQLITE_AFF_BLOB; } @@ -146048,7 +148527,7 @@ static void codeCursorHint( sWalker.pParse = pParse; sWalker.u.pCCurHint = &sHint; pWC = &pWInfo->sWC; - for(i=0; inTerm; i++){ + for(i=0; inBase; i++){ pTerm = &pWC->a[i]; if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; if( pTerm->prereqAll & pLevel->notReady ) continue; @@ -146078,7 +148557,7 @@ static void codeCursorHint( if( pTabItem->fg.jointype & JT_LEFT ){ Expr *pExpr = pTerm->pExpr; if( !ExprHasProperty(pExpr, EP_FromJoin) - || pExpr->iRightJoinTable!=pTabItem->iCursor + || pExpr->w.iRightJoinTable!=pTabItem->iCursor ){ sWalker.eCode = 0; sWalker.xExprCallback = codeCursorHintIsOrFunction; @@ -146190,7 +148669,7 @@ static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){ assert( nReg>0 ); if( p && sqlite3ExprIsVector(p) ){ #ifndef SQLITE_OMIT_SUBQUERY - if( (p->flags & EP_xIsSelect) ){ + if( ExprUseXSelect(p) ){ Vdbe *v = pParse->pVdbe; int iSelect; assert( p->op==TK_SELECT ); @@ -146200,7 +148679,9 @@ static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){ #endif { int i; - ExprList *pList = p->x.pList; + const ExprList *pList; + assert( ExprUseXList(p) ); + pList = p->x.pList; assert( nReg<=pList->nExpr ); for(i=0; ia[i].pExpr, iReg+i); @@ -146248,15 +148729,16 @@ static void preserveExpr(IdxExprTrans *pTrans, Expr *pExpr){ static int whereIndexExprTransNode(Walker *p, Expr *pExpr){ IdxExprTrans *pX = p->u.pIdxTrans; if( sqlite3ExprCompare(0, pExpr, pX->pIdxExpr, pX->iTabCur)==0 ){ + pExpr = sqlite3ExprSkipCollate(pExpr); preserveExpr(pX, pExpr); pExpr->affExpr = sqlite3ExprAffinity(pExpr); pExpr->op = TK_COLUMN; pExpr->iTable = pX->iIdxCur; pExpr->iColumn = pX->iIdxCol; - pExpr->y.pTab = 0; testcase( ExprHasProperty(pExpr, EP_Skip) ); testcase( ExprHasProperty(pExpr, EP_Unlikely) ); - ExprClearProperty(pExpr, EP_Skip|EP_Unlikely); + ExprClearProperty(pExpr, EP_Skip|EP_Unlikely|EP_WinFunc|EP_Subrtn); + pExpr->y.pTab = 0; return WRC_Prune; }else{ return WRC_Continue; @@ -146271,7 +148753,7 @@ static int whereIndexExprTransColumn(Walker *p, Expr *pExpr){ if( pExpr->op==TK_COLUMN ){ IdxExprTrans *pX = p->u.pIdxTrans; if( pExpr->iTable==pX->iTabCur && pExpr->iColumn==pX->iTabCol ){ - assert( pExpr->y.pTab!=0 ); + assert( ExprUseYTab(pExpr) && pExpr->y.pTab!=0 ); preserveExpr(pX, pExpr); pExpr->affExpr = sqlite3TableColumnAffinity(pExpr->y.pTab,pExpr->iColumn); pExpr->iTable = pX->iIdxCur; @@ -146319,15 +148801,16 @@ static void whereIndexExprTrans( for(iIdxCol=0; iIdxColnColumn; iIdxCol++){ i16 iRef = pIdx->aiColumn[iIdxCol]; if( iRef==XN_EXPR ){ - assert( aColExpr->a[iIdxCol].pExpr!=0 ); + assert( aColExpr!=0 && aColExpr->a[iIdxCol].pExpr!=0 ); x.pIdxExpr = aColExpr->a[iIdxCol].pExpr; if( sqlite3ExprIsConstant(x.pIdxExpr) ) continue; w.xExprCallback = whereIndexExprTransNode; #ifndef SQLITE_OMIT_GENERATED_COLUMNS }else if( iRef>=0 && (pTab->aCol[iRef].colFlags & COLFLAG_VIRTUAL)!=0 - && (pTab->aCol[iRef].zColl==0 - || sqlite3StrICmp(pTab->aCol[iRef].zColl, sqlite3StrBINARY)==0) + && ((pTab->aCol[iRef].colFlags & COLFLAG_HASCOLL)==0 + || sqlite3StrICmp(sqlite3ColumnColl(&pTab->aCol[iRef]), + sqlite3StrBINARY)==0) ){ /* Check to see if there are direct references to generated columns ** that are contained in the index. Pulling the generated column @@ -146376,6 +148859,68 @@ static void whereApplyPartialIndexConstraints( } } +/* +** This routine is called right after An OP_Filter has been generated and +** before the corresponding index search has been performed. This routine +** checks to see if there are additional Bloom filters in inner loops that +** can be checked prior to doing the index lookup. If there are available +** inner-loop Bloom filters, then evaluate those filters now, before the +** index lookup. The idea is that a Bloom filter check is way faster than +** an index lookup, and the Bloom filter might return false, meaning that +** the index lookup can be skipped. +** +** We know that an inner loop uses a Bloom filter because it has the +** WhereLevel.regFilter set. If an inner-loop Bloom filter is checked, +** then clear the WhereLevel.regFilter value to prevent the Bloom filter +** from being checked a second time when the inner loop is evaluated. +*/ +static SQLITE_NOINLINE void filterPullDown( + Parse *pParse, /* Parsing context */ + WhereInfo *pWInfo, /* Complete information about the WHERE clause */ + int iLevel, /* Which level of pWInfo->a[] should be coded */ + int addrNxt, /* Jump here to bypass inner loops */ + Bitmask notReady /* Loops that are not ready */ +){ + while( ++iLevel < pWInfo->nLevel ){ + WhereLevel *pLevel = &pWInfo->a[iLevel]; + WhereLoop *pLoop = pLevel->pWLoop; + if( pLevel->regFilter==0 ) continue; + if( pLevel->pWLoop->nSkip ) continue; + /* ,--- Because sqlite3ConstructBloomFilter() has will not have set + ** vvvvv--' pLevel->regFilter if this were true. */ + if( NEVER(pLoop->prereq & notReady) ) continue; + assert( pLevel->addrBrk==0 ); + pLevel->addrBrk = addrNxt; + if( pLoop->wsFlags & WHERE_IPK ){ + WhereTerm *pTerm = pLoop->aLTerm[0]; + int regRowid; + assert( pTerm!=0 ); + assert( pTerm->pExpr!=0 ); + testcase( pTerm->wtFlags & TERM_VIRTUAL ); + regRowid = sqlite3GetTempReg(pParse); + regRowid = codeEqualityTerm(pParse, pTerm, pLevel, 0, 0, regRowid); + sqlite3VdbeAddOp4Int(pParse->pVdbe, OP_Filter, pLevel->regFilter, + addrNxt, regRowid, 1); + VdbeCoverage(pParse->pVdbe); + }else{ + u16 nEq = pLoop->u.btree.nEq; + int r1; + char *zStartAff; + + assert( pLoop->wsFlags & WHERE_INDEXED ); + assert( (pLoop->wsFlags & WHERE_COLUMN_IN)==0 ); + r1 = codeAllEqualityTerms(pParse,pLevel,0,0,&zStartAff); + codeApplyAffinity(pParse, r1, nEq, zStartAff); + sqlite3DbFree(pParse->db, zStartAff); + sqlite3VdbeAddOp4Int(pParse->pVdbe, OP_Filter, pLevel->regFilter, + addrNxt, r1, nEq); + VdbeCoverage(pParse->pVdbe); + } + pLevel->regFilter = 0; + pLevel->addrBrk = 0; + } +} + /* ** Generate code for the start of the iLevel-th loop in the WHERE clause ** implementation described by pWInfo. @@ -146478,7 +149023,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int iReg; /* P3 Value for OP_VFilter */ int addrNotFound; int nConstraint = pLoop->nLTerm; - int iIn; /* Counter for IN constraints */ iReg = sqlite3GetTempRange(pParse, nConstraint+2); addrNotFound = pLevel->addrBrk; @@ -146487,11 +149031,27 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( pTerm = pLoop->aLTerm[j]; if( NEVER(pTerm==0) ) continue; if( pTerm->eOperator & WO_IN ){ - codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget); - addrNotFound = pLevel->addrNxt; + if( SMASKBIT32(j) & pLoop->u.vtab.mHandleIn ){ + int iTab = pParse->nTab++; + int iCache = ++pParse->nMem; + sqlite3CodeRhsOfIN(pParse, pTerm->pExpr, iTab); + sqlite3VdbeAddOp3(v, OP_VInitIn, iTab, iTarget, iCache); + }else{ + codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget); + addrNotFound = pLevel->addrNxt; + } }else{ Expr *pRight = pTerm->pExpr->pRight; codeExprOrVector(pParse, pRight, iTarget, 1); + if( pTerm->eMatchOp==SQLITE_INDEX_CONSTRAINT_OFFSET + && pLoop->u.vtab.bOmitOffset + ){ + assert( pTerm->eOperator==WO_AUX ); + assert( pWInfo->pLimit!=0 ); + assert( pWInfo->pLimit->iOffset>0 ); + sqlite3VdbeAddOp2(v, OP_Integer, 0, pWInfo->pLimit->iOffset); + VdbeComment((v,"Zero OFFSET counter")); + } } } sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg); @@ -146507,40 +149067,55 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( pLevel->p1 = iCur; pLevel->op = pWInfo->eOnePass ? OP_Noop : OP_VNext; pLevel->p2 = sqlite3VdbeCurrentAddr(v); - iIn = pLevel->u.in.nIn; - for(j=nConstraint-1; j>=0; j--){ + assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); + + for(j=0; jaLTerm[j]; - if( (pTerm->eOperator & WO_IN)!=0 ) iIn--; if( j<16 && (pLoop->u.vtab.omitMask>>j)&1 ){ disableTerm(pLevel, pTerm); - }else if( (pTerm->eOperator & WO_IN)!=0 - && sqlite3ExprVectorSize(pTerm->pExpr->pLeft)==1 + continue; + } + if( (pTerm->eOperator & WO_IN)!=0 + && (SMASKBIT32(j) & pLoop->u.vtab.mHandleIn)==0 + && !db->mallocFailed ){ Expr *pCompare; /* The comparison operator */ Expr *pRight; /* RHS of the comparison */ VdbeOp *pOp; /* Opcode to access the value of the IN constraint */ + int iIn; /* IN loop corresponding to the j-th constraint */ /* Reload the constraint value into reg[iReg+j+2]. The same value ** was loaded into the same register prior to the OP_VFilter, but ** the xFilter implementation might have changed the datatype or - ** encoding of the value in the register, so it *must* be reloaded. */ - assert( pLevel->u.in.aInLoop!=0 || db->mallocFailed ); - if( !db->mallocFailed ){ - assert( iIn>=0 && iInu.in.nIn ); + ** encoding of the value in the register, so it *must* be reloaded. + */ + for(iIn=0; ALWAYS(iInu.in.nIn); iIn++){ pOp = sqlite3VdbeGetOp(v, pLevel->u.in.aInLoop[iIn].addrInTop); - assert( pOp->opcode==OP_Column || pOp->opcode==OP_Rowid ); - assert( pOp->opcode!=OP_Column || pOp->p3==iReg+j+2 ); - assert( pOp->opcode!=OP_Rowid || pOp->p2==iReg+j+2 ); - testcase( pOp->opcode==OP_Rowid ); - sqlite3VdbeAddOp3(v, pOp->opcode, pOp->p1, pOp->p2, pOp->p3); + if( (pOp->opcode==OP_Column && pOp->p3==iReg+j+2) + || (pOp->opcode==OP_Rowid && pOp->p2==iReg+j+2) + ){ + testcase( pOp->opcode==OP_Rowid ); + sqlite3VdbeAddOp3(v, pOp->opcode, pOp->p1, pOp->p2, pOp->p3); + break; + } } /* Generate code that will continue to the next row if - ** the IN constraint is not satisfied */ + ** the IN constraint is not satisfied + */ pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0); - assert( pCompare!=0 || db->mallocFailed ); - if( pCompare ){ - pCompare->pLeft = pTerm->pExpr->pLeft; + if( !db->mallocFailed ){ + int iFld = pTerm->u.x.iField; + Expr *pLeft = pTerm->pExpr->pLeft; + assert( pLeft!=0 ); + if( iFld>0 ){ + assert( pLeft->op==TK_VECTOR ); + assert( ExprUseXList(pLeft) ); + assert( iFld<=pLeft->x.pList->nExpr ); + pCompare->pLeft = pLeft->x.pList->a[iFld-1].pExpr; + }else{ + pCompare->pLeft = pLeft; + } pCompare->pRight = pRight = sqlite3Expr(db, TK_REGISTER, 0); if( pRight ){ pRight->iTable = iReg+j+2; @@ -146549,11 +149124,11 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ); } pCompare->pLeft = 0; - sqlite3ExprDelete(db, pCompare); } + sqlite3ExprDelete(db, pCompare); } } - assert( iIn==0 || db->mallocFailed ); + /* These registers need to be preserved in case there is an IN operator ** loop. So we could deallocate the registers here (and potentially ** reuse them later) if (pLoop->wsFlags & WHERE_IN_ABLE)==0. But it seems @@ -146581,12 +149156,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg); if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg); addrNxt = pLevel->addrNxt; + if( pLevel->regFilter ){ + sqlite3VdbeAddOp4Int(v, OP_Filter, pLevel->regFilter, addrNxt, + iRowidReg, 1); + VdbeCoverage(v); + filterPullDown(pParse, pWInfo, iLevel, addrNxt, notReady); + } sqlite3VdbeAddOp3(v, OP_SeekRowid, iCur, addrNxt, iRowidReg); VdbeCoverage(v); pLevel->op = OP_Noop; - if( (pTerm->prereqAll & pLevel->notReady)==0 ){ - pTerm->wtFlags |= TERM_CODED; - } }else if( (pLoop->wsFlags & WHERE_IPK)!=0 && (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0 ){ @@ -146909,6 +149487,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( sqlite3VdbeAddOp2(v, OP_Integer, 1, regBignull); VdbeComment((v, "NULL-scan pass ctr")); } + if( pLevel->regFilter ){ + sqlite3VdbeAddOp4Int(v, OP_Filter, pLevel->regFilter, addrNxt, + regBase, nEq); + VdbeCoverage(v); + filterPullDown(pParse, pWInfo, iLevel, addrNxt, notReady); + } op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev]; assert( op!=0 ); @@ -146957,8 +149541,19 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** range (if any). */ nConstraint = nEq; + assert( pLevel->p2==0 ); if( pRangeEnd ){ Expr *pRight = pRangeEnd->pExpr->pRight; + if( addrSeekScan ){ + /* For a seek-scan that has a range on the lowest term of the index, + ** we have to make the top of the loop be code that sets the end + ** condition of the range. Otherwise, the OP_SeekScan might jump + ** over that initialization, leaving the range-end value set to the + ** range-start value, resulting in a wrong answer. + ** See ticket 5981a8c041a3c2f3 (2021-11-02). + */ + pLevel->p2 = sqlite3VdbeCurrentAddr(v); + } codeExprOrVector(pParse, pRight, regBase+nEq, nTop); whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd); if( (pRangeEnd->wtFlags & TERM_VNULL)==0 @@ -146992,7 +149587,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( sqlite3DbFree(db, zEndAff); /* Top of the loop body */ - pLevel->p2 = sqlite3VdbeCurrentAddr(v); + if( pLevel->p2==0 ) pLevel->p2 = sqlite3VdbeCurrentAddr(v); /* Check if the index cursor is past the end of the range. */ if( nConstraint ){ @@ -147225,7 +149820,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn); /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y - ** Then for every term xN, evaluate as the subexpression: xN AND z + ** Then for every term xN, evaluate as the subexpression: xN AND y ** That way, terms in y that are factored into the disjunction will ** be picked up by the recursive calls to sqlite3WhereBegin() below. ** @@ -147237,6 +149832,20 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** This optimization also only applies if the (x1 OR x2 OR ...) term ** is not contained in the ON clause of a LEFT JOIN. ** See ticket http://www.sqlite.org/src/info/f2369304e4 + ** + ** 2022-02-04: Do not push down slices of a row-value comparison. + ** In other words, "w" or "y" may not be a slice of a vector. Otherwise, + ** the initialization of the right-hand operand of the vector comparison + ** might not occur, or might occur only in an OR branch that is not + ** taken. dbsqlfuzz 80a9fade844b4fb43564efc972bcb2c68270f5d1. + ** + ** 2022-03-03: Do not push down expressions that involve subqueries. + ** The subquery might get coded as a subroutine. Any table-references + ** in the subquery might be resolved to index-references for the index on + ** the OR branch in which the subroutine is coded. But if the subroutine + ** is invoked from a different OR branch that uses a different index, such + ** index-references will not work. tag-20220303a + ** https://sqlite.org/forum/forumpost/36937b197273d403 */ if( pWC->nTerm>1 ){ int iTerm; @@ -147245,9 +149854,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( &pWC->a[iTerm] == pTerm ) continue; testcase( pWC->a[iTerm].wtFlags & TERM_VIRTUAL ); testcase( pWC->a[iTerm].wtFlags & TERM_CODED ); - if( (pWC->a[iTerm].wtFlags & (TERM_VIRTUAL|TERM_CODED))!=0 ) continue; + testcase( pWC->a[iTerm].wtFlags & TERM_SLICE ); + if( (pWC->a[iTerm].wtFlags & (TERM_VIRTUAL|TERM_CODED|TERM_SLICE))!=0 ){ + continue; + } if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue; - testcase( pWC->a[iTerm].wtFlags & TERM_ORINFO ); + if( ExprHasProperty(pExpr, EP_Subquery) ) continue; /* tag-20220303a */ pExpr = sqlite3ExprDup(db, pExpr, 0); pAndExpr = sqlite3ExprAnd(pParse, pAndExpr, pExpr); } @@ -147288,9 +149900,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( /* Loop through table entries that match term pOrTerm. */ ExplainQueryPlan((pParse, 1, "INDEX %d", ii+1)); WHERETRACE(0xffff, ("Subplan for OR-clause:\n")); - pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, + pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, 0, WHERE_OR_SUBCLAUSE, iCovCur); - assert( pSubWInfo || pParse->nErr || db->mallocFailed ); + assert( pSubWInfo || pParse->nErr ); if( pSubWInfo ){ WhereLoop *pSubLoop; int addrExplain = sqlite3WhereExplainOneScan( @@ -147399,7 +150011,10 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } } ExplainQueryPlanPop(pParse); - pLevel->u.pCovidx = pCov; + assert( pLevel->pWLoop==pLoop ); + assert( (pLoop->wsFlags & WHERE_MULTI_OR)!=0 ); + assert( (pLoop->wsFlags & WHERE_IN_ABLE)==0 ); + pLevel->u.pCoveringIdx = pCov; if( pCov ) pLevel->iIdxCur = iCovCur; if( pAndExpr ){ pAndExpr->pLeft = 0; @@ -147526,7 +150141,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** then we cannot use the "t1.a=t2.b" constraint, but we can code ** the implied "t1.a=123" constraint. */ - for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){ + for(pTerm=pWC->a, j=pWC->nBase; j>0; j--, pTerm++){ Expr *pE, sEAlt; WhereTerm *pAlt; if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; @@ -147543,12 +150158,13 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( #endif assert( !ExprHasProperty(pE, EP_FromJoin) ); assert( (pTerm->prereqRight & pLevel->notReady)!=0 ); + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.x.leftColumn, notReady, WO_EQ|WO_IN|WO_IS, 0); if( pAlt==0 ) continue; if( pAlt->wtFlags & (TERM_CODED) ) continue; if( (pAlt->eOperator & WO_IN) - && (pAlt->pExpr->flags & EP_xIsSelect) + && ExprUseXSelect(pAlt->pExpr) && (pAlt->pExpr->x.pSelect->pEList->nExpr>1) ){ continue; @@ -147570,7 +150186,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( pLevel->addrFirst = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin); VdbeComment((v, "record LEFT JOIN hit")); - for(pTerm=pWC->a, j=0; jnTerm; j++, pTerm++){ + for(pTerm=pWC->a, j=0; jnBase; j++, pTerm++){ testcase( pTerm->wtFlags & TERM_VIRTUAL ); testcase( pTerm->wtFlags & TERM_CODED ); if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; @@ -147681,6 +150297,7 @@ static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){ pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]); } pTerm = &pWC->a[idx = pWC->nTerm++]; + if( (wtFlags & TERM_VIRTUAL)==0 ) pWC->nBase = pWC->nTerm; if( p && ExprHasProperty(p, EP_Unlikely) ){ pTerm->truthProb = sqlite3LogEst(p->iTable) - 270; }else{ @@ -147797,6 +150414,7 @@ static int isLikeOrGlob( #ifdef SQLITE_EBCDIC if( *pnoCase ) return 0; #endif + assert( ExprUseXList(pExpr) ); pList = pExpr->x.pList; pLeft = pList->a[1].pExpr; @@ -147812,7 +150430,8 @@ static int isLikeOrGlob( sqlite3VdbeSetVarmask(pParse->pVdbe, iCol); assert( pRight->op==TK_VARIABLE || pRight->op==TK_REGISTER ); }else if( op==TK_STRING ){ - z = (u8*)pRight->u.zToken; + assert( !ExprHasProperty(pRight, EP_IntValue) ); + z = (u8*)pRight->u.zToken; } if( z ){ @@ -147841,7 +150460,9 @@ static int isLikeOrGlob( pPrefix = sqlite3Expr(db, TK_STRING, (char*)z); if( pPrefix ){ int iFrom, iTo; - char *zNew = pPrefix->u.zToken; + char *zNew; + assert( !ExprHasProperty(pPrefix, EP_IntValue) ); + zNew = pPrefix->u.zToken; zNew[cnt] = 0; for(iFrom=iTo=0; iFromop!=TK_COLUMN || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT - || IsVirtual(pLeft->y.pTab) /* Value might be numeric */ + || (ALWAYS( ExprUseYTab(pLeft) ) + && pLeft->y.pTab + && IsVirtual(pLeft->y.pTab)) /* Might be numeric */ ){ int isNum; double rDummy; @@ -147893,6 +150516,7 @@ static int isLikeOrGlob( if( op==TK_VARIABLE ){ Vdbe *v = pParse->pVdbe; sqlite3VdbeSetVarmask(v, pRight->iColumn); + assert( !ExprHasProperty(pRight, EP_IntValue) ); if( *pisComplete && pRight->u.zToken[1] ){ /* If the rhs of the LIKE expression is a variable, and the current ** value of the variable means there is no need to invoke the LIKE @@ -147966,6 +150590,7 @@ static int isAuxiliaryVtabOperator( Expr *pCol; /* Column reference */ int i; + assert( ExprUseXList(pExpr) ); pList = pExpr->x.pList; if( pList==0 || pList->nExpr!=2 ){ return 0; @@ -147979,9 +150604,11 @@ static int isAuxiliaryVtabOperator( ** MATCH(expression,vtab_column) */ pCol = pList->a[1].pExpr; + assert( pCol->op!=TK_COLUMN || ExprUseYTab(pCol) ); testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 ); if( ExprIsVtab(pCol) ){ for(i=0; iu.zToken, aOp[i].zOp)==0 ){ *peOp2 = aOp[i].eOp2; *ppRight = pList->a[0].pExpr; @@ -148002,6 +150629,7 @@ static int isAuxiliaryVtabOperator( ** with function names in an arbitrary case. */ pCol = pList->a[0].pExpr; + assert( pCol->op!=TK_COLUMN || ExprUseYTab(pCol) ); testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 ); if( ExprIsVtab(pCol) ){ sqlite3_vtab *pVtab; @@ -148011,6 +150639,7 @@ static int isAuxiliaryVtabOperator( pVtab = sqlite3GetVTable(db, pCol->y.pTab)->pVtab; assert( pVtab!=0 ); assert( pVtab->pModule!=0 ); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); pMod = (sqlite3_module *)pVtab->pModule; if( pMod->xFindFunction!=0 ){ i = pMod->xFindFunction(pVtab,2, pExpr->u.zToken, &xNotUsed, &pNotUsed); @@ -148026,10 +150655,12 @@ static int isAuxiliaryVtabOperator( int res = 0; Expr *pLeft = pExpr->pLeft; Expr *pRight = pExpr->pRight; + assert( pLeft->op!=TK_COLUMN || ExprUseYTab(pLeft) ); testcase( pLeft->op==TK_COLUMN && pLeft->y.pTab==0 ); if( ExprIsVtab(pLeft) ){ res++; } + assert( pRight==0 || pRight->op!=TK_COLUMN || ExprUseYTab(pRight) ); testcase( pRight && pRight->op==TK_COLUMN && pRight->y.pTab==0 ); if( pRight && ExprIsVtab(pRight) ){ res++; @@ -148053,7 +150684,7 @@ static int isAuxiliaryVtabOperator( static void transferJoinMarkings(Expr *pDerived, Expr *pBase){ if( pDerived ){ pDerived->flags |= pBase->flags & EP_FromJoin; - pDerived->iRightJoinTable = pBase->iRightJoinTable; + pDerived->w.iRightJoinTable = pBase->w.iRightJoinTable; } } @@ -148282,6 +150913,7 @@ static void exprAnalyzeOrTerm( pOrTerm->u.pAndInfo = pAndInfo; pOrTerm->wtFlags |= TERM_ANDINFO; pOrTerm->eOperator = WO_AND; + pOrTerm->leftCursor = -1; pAndWC = &pAndInfo->wc; memset(pAndWC->aStatic, 0, sizeof(pAndWC->aStatic)); sqlite3WhereClauseInit(pAndWC, pWC->pWInfo); @@ -148324,11 +150956,10 @@ static void exprAnalyzeOrTerm( ** empty. */ pOrInfo->indexable = indexable; + pTerm->eOperator = WO_OR; + pTerm->leftCursor = -1; if( indexable ){ - pTerm->eOperator = WO_OR; pWC->hasOr = 1; - }else{ - pTerm->eOperator = WO_OR; } /* For a two-way OR, attempt to implementation case 2. @@ -148383,7 +151014,7 @@ static void exprAnalyzeOrTerm( pOrTerm = pOrWc->a; for(i=pOrWc->nTerm-1; i>=0; i--, pOrTerm++){ assert( pOrTerm->eOperator & WO_EQ ); - pOrTerm->wtFlags &= ~TERM_OR_OK; + pOrTerm->wtFlags &= ~TERM_OK; if( pOrTerm->leftCursor==iCursor ){ /* This is the 2-bit case and we are on the second iteration and ** current term is from the first iteration. So skip this term. */ @@ -148401,6 +151032,7 @@ static void exprAnalyzeOrTerm( assert( pOrTerm->wtFlags & (TERM_COPIED|TERM_VIRTUAL) ); continue; } + assert( (pOrTerm->eOperator & (WO_OR|WO_AND))==0 ); iColumn = pOrTerm->u.x.leftColumn; iCursor = pOrTerm->leftCursor; pLeft = pOrTerm->pExpr->pLeft; @@ -148421,8 +151053,9 @@ static void exprAnalyzeOrTerm( okToChngToIN = 1; for(; i>=0 && okToChngToIN; i--, pOrTerm++){ assert( pOrTerm->eOperator & WO_EQ ); + assert( (pOrTerm->eOperator & (WO_OR|WO_AND))==0 ); if( pOrTerm->leftCursor!=iCursor ){ - pOrTerm->wtFlags &= ~TERM_OR_OK; + pOrTerm->wtFlags &= ~TERM_OK; }else if( pOrTerm->u.x.leftColumn!=iColumn || (iColumn==XN_EXPR && sqlite3ExprCompare(pParse, pOrTerm->pExpr->pLeft, pLeft, -1) )){ @@ -148438,7 +151071,7 @@ static void exprAnalyzeOrTerm( if( affRight!=0 && affRight!=affLeft ){ okToChngToIN = 0; }else{ - pOrTerm->wtFlags |= TERM_OR_OK; + pOrTerm->wtFlags |= TERM_OK; } } } @@ -148455,8 +151088,9 @@ static void exprAnalyzeOrTerm( Expr *pNew; /* The complete IN operator */ for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0; i--, pOrTerm++){ - if( (pOrTerm->wtFlags & TERM_OR_OK)==0 ) continue; + if( (pOrTerm->wtFlags & TERM_OK)==0 ) continue; assert( pOrTerm->eOperator & WO_EQ ); + assert( (pOrTerm->eOperator & (WO_OR|WO_AND))==0 ); assert( pOrTerm->leftCursor==iCursor ); assert( pOrTerm->u.x.leftColumn==iColumn ); pDup = sqlite3ExprDup(db, pOrTerm->pExpr->pRight, 0); @@ -148469,7 +151103,7 @@ static void exprAnalyzeOrTerm( if( pNew ){ int idxNew; transferJoinMarkings(pNew, pExpr); - assert( !ExprHasProperty(pNew, EP_xIsSelect) ); + assert( ExprUseXList(pNew) ); pNew->x.pList = pList; idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); @@ -148597,6 +151231,7 @@ static int exprMightBeIndexed( assert( TK_ISop==TK_VECTOR && (op>=TK_GT && ALWAYS(op<=TK_GE)) ){ + assert( ExprUseXList(pExpr) ); pExpr = pExpr->x.pList->a[0].pExpr; } @@ -148654,30 +151289,47 @@ static void exprAnalyze( if( db->mallocFailed ){ return; } + assert( pWC->nTerm > idxTerm ); pTerm = &pWC->a[idxTerm]; pMaskSet = &pWInfo->sMaskSet; pExpr = pTerm->pExpr; + assert( pExpr!=0 ); /* Because malloc() has not failed */ assert( pExpr->op!=TK_AS && pExpr->op!=TK_COLLATE ); + pMaskSet->bVarSelect = 0; prereqLeft = sqlite3WhereExprUsage(pMaskSet, pExpr->pLeft); op = pExpr->op; if( op==TK_IN ){ assert( pExpr->pRight==0 ); if( sqlite3ExprCheckIN(pParse, pExpr) ) return; - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ pTerm->prereqRight = exprSelectUsage(pMaskSet, pExpr->x.pSelect); }else{ pTerm->prereqRight = sqlite3WhereExprListUsage(pMaskSet, pExpr->x.pList); } - }else if( op==TK_ISNULL ){ - pTerm->prereqRight = 0; + prereqAll = prereqLeft | pTerm->prereqRight; }else{ pTerm->prereqRight = sqlite3WhereExprUsage(pMaskSet, pExpr->pRight); + if( pExpr->pLeft==0 + || ExprHasProperty(pExpr, EP_xIsSelect|EP_IfNullRow) + || pExpr->x.pList!=0 + ){ + prereqAll = sqlite3WhereExprUsageNN(pMaskSet, pExpr); + }else{ + prereqAll = prereqLeft | pTerm->prereqRight; + } } - pMaskSet->bVarSelect = 0; - prereqAll = sqlite3WhereExprUsageNN(pMaskSet, pExpr); if( pMaskSet->bVarSelect ) pTerm->wtFlags |= TERM_VARSELECT; + +#ifdef SQLITE_DEBUG + if( prereqAll!=sqlite3WhereExprUsageNN(pMaskSet, pExpr) ){ + printf("\n*** Incorrect prereqAll computed for:\n"); + sqlite3TreeViewExpr(0,pExpr,0); + abort(); + } +#endif + if( ExprHasProperty(pExpr, EP_FromJoin) ){ - Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->iRightJoinTable); + Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->w.iRightJoinTable); prereqAll |= x; extraRight = x-1; /* ON clause terms may not be used with an index ** on left table of a LEFT JOIN. Ticket #3015 */ @@ -148699,11 +151351,13 @@ static void exprAnalyze( if( pTerm->u.x.iField>0 ){ assert( op==TK_IN ); assert( pLeft->op==TK_VECTOR ); + assert( ExprUseXList(pLeft) ); pLeft = pLeft->x.pList->a[pTerm->u.x.iField-1].pExpr; } if( exprMightBeIndexed(pSrc, prereqLeft, aiCurCol, pLeft, op) ){ pTerm->leftCursor = aiCurCol[0]; + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); pTerm->u.x.leftColumn = aiCurCol[1]; pTerm->eOperator = operatorMask(op) & opMask; } @@ -148741,12 +151395,18 @@ static void exprAnalyze( } pNew->wtFlags |= exprCommute(pParse, pDup); pNew->leftCursor = aiCurCol[0]; + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); pNew->u.x.leftColumn = aiCurCol[1]; testcase( (prereqLeft | extraRight) != prereqLeft ); pNew->prereqRight = prereqLeft | extraRight; pNew->prereqAll = prereqAll; pNew->eOperator = (operatorMask(pDup->op) + eExtraOp) & opMask; - }else if( op==TK_ISNULL && 0==sqlite3ExprCanBeNull(pLeft) ){ + }else + if( op==TK_ISNULL + && !ExprHasProperty(pExpr,EP_FromJoin) + && 0==sqlite3ExprCanBeNull(pLeft) + ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); pExpr->op = TK_TRUEFALSE; pExpr->u.zToken = "false"; ExprSetProperty(pExpr, EP_IsFalse); @@ -148772,9 +151432,11 @@ static void exprAnalyze( ** BETWEEN term is skipped. */ else if( pExpr->op==TK_BETWEEN && pWC->op==TK_AND ){ - ExprList *pList = pExpr->x.pList; + ExprList *pList; int i; static const u8 ops[] = {TK_GE, TK_LE}; + assert( ExprUseXList(pExpr) ); + pList = pExpr->x.pList; assert( pList!=0 ); assert( pList->nExpr==2 ); for(i=0; i<2; i++){ @@ -148867,8 +151529,12 @@ static void exprAnalyze( const char *zCollSeqName; /* Name of collating sequence */ const u16 wtFlags = TERM_LIKEOPT | TERM_VIRTUAL | TERM_DYNAMIC; + assert( ExprUseXList(pExpr) ); pLeft = pExpr->x.pList->a[1].pExpr; pStr2 = sqlite3ExprDup(db, pStr1, 0); + assert( pStr1==0 || !ExprHasProperty(pStr1, EP_IntValue) ); + assert( pStr2==0 || !ExprHasProperty(pStr2, EP_IntValue) ); + /* Convert the lower bound to upper-case and the upper bound to ** lower-case (upper-case is less than lower-case in ASCII) so that @@ -148931,7 +151597,10 @@ static void exprAnalyze( ** no longer used. ** ** This is only required if at least one side of the comparison operation - ** is not a sub-select. */ + ** is not a sub-select. + ** + ** tag-20220128a + */ if( (pExpr->op==TK_EQ || pExpr->op==TK_IS) && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1 && sqlite3ExprVectorSize(pExpr->pRight)==nLeft @@ -148943,12 +151612,12 @@ static void exprAnalyze( for(i=0; ipLeft, i); - Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i); + Expr *pLeft = sqlite3ExprForVectorField(pParse, pExpr->pLeft, i, nLeft); + Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i, nLeft); pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight); transferJoinMarkings(pNew, pExpr); - idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC); + idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC|TERM_SLICE); exprAnalyze(pSrc, pWC, idxNew); } pTerm = &pWC->a[idxTerm]; @@ -148968,6 +151637,7 @@ static void exprAnalyze( else if( pExpr->op==TK_IN && pTerm->u.x.iField==0 && pExpr->pLeft->op==TK_VECTOR + && ALWAYS( ExprUseXSelect(pExpr) ) && pExpr->x.pSelect->pPrior==0 #ifndef SQLITE_OMIT_WINDOWFUNC && pExpr->x.pSelect->pWin==0 @@ -148977,7 +151647,7 @@ static void exprAnalyze( int i; for(i=0; ipLeft); i++){ int idxNew; - idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL); + idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL|TERM_SLICE); pWC->a[idxNew].u.x.iField = i+1; exprAnalyze(pSrc, pWC, idxNew); markTermAsChild(pWC, idxNew, idxTerm); @@ -149010,7 +151680,7 @@ static void exprAnalyze( 0, sqlite3ExprDup(db, pRight, 0)); if( ExprHasProperty(pExpr, EP_FromJoin) && pNewExpr ){ ExprSetProperty(pNewExpr, EP_FromJoin); - pNewExpr->iRightJoinTable = pExpr->iRightJoinTable; + pNewExpr->w.iRightJoinTable = pExpr->w.iRightJoinTable; } idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); @@ -149073,6 +151743,113 @@ SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause *pWC, Expr *pExpr, u8 op){ } } +/* +** Add either a LIMIT (if eMatchOp==SQLITE_INDEX_CONSTRAINT_LIMIT) or +** OFFSET (if eMatchOp==SQLITE_INDEX_CONSTRAINT_OFFSET) term to the +** where-clause passed as the first argument. The value for the term +** is found in register iReg. +** +** In the common case where the value is a simple integer +** (example: "LIMIT 5 OFFSET 10") then the expression codes as a +** TK_INTEGER so that it will be available to sqlite3_vtab_rhs_value(). +** If not, then it codes as a TK_REGISTER expression. +*/ +static void whereAddLimitExpr( + WhereClause *pWC, /* Add the constraint to this WHERE clause */ + int iReg, /* Register that will hold value of the limit/offset */ + Expr *pExpr, /* Expression that defines the limit/offset */ + int iCsr, /* Cursor to which the constraint applies */ + int eMatchOp /* SQLITE_INDEX_CONSTRAINT_LIMIT or _OFFSET */ +){ + Parse *pParse = pWC->pWInfo->pParse; + sqlite3 *db = pParse->db; + Expr *pNew; + int iVal = 0; + + if( sqlite3ExprIsInteger(pExpr, &iVal) && iVal>=0 ){ + Expr *pVal = sqlite3Expr(db, TK_INTEGER, 0); + if( pVal==0 ) return; + ExprSetProperty(pVal, EP_IntValue); + pVal->u.iValue = iVal; + pNew = sqlite3PExpr(pParse, TK_MATCH, 0, pVal); + }else{ + Expr *pVal = sqlite3Expr(db, TK_REGISTER, 0); + if( pVal==0 ) return; + pVal->iTable = iReg; + pNew = sqlite3PExpr(pParse, TK_MATCH, 0, pVal); + } + if( pNew ){ + WhereTerm *pTerm; + int idx; + idx = whereClauseInsert(pWC, pNew, TERM_DYNAMIC|TERM_VIRTUAL); + pTerm = &pWC->a[idx]; + pTerm->leftCursor = iCsr; + pTerm->eOperator = WO_AUX; + pTerm->eMatchOp = eMatchOp; + } +} + +/* +** Possibly add terms corresponding to the LIMIT and OFFSET clauses of the +** SELECT statement passed as the second argument. These terms are only +** added if: +** +** 1. The SELECT statement has a LIMIT clause, and +** 2. The SELECT statement is not an aggregate or DISTINCT query, and +** 3. The SELECT statement has exactly one object in its from clause, and +** that object is a virtual table, and +** 4. There are no terms in the WHERE clause that will not be passed +** to the virtual table xBestIndex method. +** 5. The ORDER BY clause, if any, will be made available to the xBestIndex +** method. +** +** LIMIT and OFFSET terms are ignored by most of the planner code. They +** exist only so that they may be passed to the xBestIndex method of the +** single virtual table in the FROM clause of the SELECT. +*/ +SQLITE_PRIVATE void sqlite3WhereAddLimit(WhereClause *pWC, Select *p){ + assert( p==0 || (p->pGroupBy==0 && (p->selFlags & SF_Aggregate)==0) ); + if( (p && p->pLimit) /* 1 */ + && (p->selFlags & (SF_Distinct|SF_Aggregate))==0 /* 2 */ + && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pTab)) /* 3 */ + ){ + ExprList *pOrderBy = p->pOrderBy; + int iCsr = p->pSrc->a[0].iCursor; + int ii; + + /* Check condition (4). Return early if it is not met. */ + for(ii=0; iinTerm; ii++){ + if( pWC->a[ii].wtFlags & TERM_CODED ){ + /* This term is a vector operation that has been decomposed into + ** other, subsequent terms. It can be ignored. See tag-20220128a */ + assert( pWC->a[ii].wtFlags & TERM_VIRTUAL ); + assert( pWC->a[ii].eOperator==0 ); + continue; + } + if( pWC->a[ii].leftCursor!=iCsr ) return; + } + + /* Check condition (5). Return early if it is not met. */ + if( pOrderBy ){ + for(ii=0; iinExpr; ii++){ + Expr *pExpr = pOrderBy->a[ii].pExpr; + if( pExpr->op!=TK_COLUMN ) return; + if( pExpr->iTable!=iCsr ) return; + if( pOrderBy->a[ii].sortFlags & KEYINFO_ORDER_BIGNULL ) return; + } + } + + /* All conditions are met. Add the terms to the where-clause object. */ + assert( p->pLimit->op==TK_LIMIT ); + whereAddLimitExpr(pWC, p->iLimit, p->pLimit->pLeft, + iCsr, SQLITE_INDEX_CONSTRAINT_LIMIT); + if( p->iOffset>0 ){ + whereAddLimitExpr(pWC, p->iOffset, p->pLimit->pRight, + iCsr, SQLITE_INDEX_CONSTRAINT_OFFSET); + } + } +} + /* ** Initialize a preallocated WhereClause structure. */ @@ -149084,6 +151861,7 @@ SQLITE_PRIVATE void sqlite3WhereClauseInit( pWC->hasOr = 0; pWC->pOuter = 0; pWC->nTerm = 0; + pWC->nBase = 0; pWC->nSlot = ArraySize(pWC->aStatic); pWC->a = pWC->aStatic; } @@ -149094,17 +151872,34 @@ SQLITE_PRIVATE void sqlite3WhereClauseInit( ** sqlite3WhereClauseInit(). */ SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){ - int i; - WhereTerm *a; sqlite3 *db = pWC->pWInfo->pParse->db; - for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){ - if( a->wtFlags & TERM_DYNAMIC ){ - sqlite3ExprDelete(db, a->pExpr); + assert( pWC->nTerm>=pWC->nBase ); + if( pWC->nTerm>0 ){ + WhereTerm *a = pWC->a; + WhereTerm *aLast = &pWC->a[pWC->nTerm-1]; +#ifdef SQLITE_DEBUG + int i; + /* Verify that every term past pWC->nBase is virtual */ + for(i=pWC->nBase; inTerm; i++){ + assert( (pWC->a[i].wtFlags & TERM_VIRTUAL)!=0 ); } - if( a->wtFlags & TERM_ORINFO ){ - whereOrInfoDelete(db, a->u.pOrInfo); - }else if( a->wtFlags & TERM_ANDINFO ){ - whereAndInfoDelete(db, a->u.pAndInfo); +#endif + while(1){ + assert( a->eMatchOp==0 || a->eOperator==WO_AUX ); + if( a->wtFlags & TERM_DYNAMIC ){ + sqlite3ExprDelete(db, a->pExpr); + } + if( a->wtFlags & (TERM_ORINFO|TERM_ANDINFO) ){ + if( a->wtFlags & TERM_ORINFO ){ + assert( (a->wtFlags & TERM_ANDINFO)==0 ); + whereOrInfoDelete(db, a->u.pOrInfo); + }else{ + assert( (a->wtFlags & TERM_ANDINFO)!=0 ); + whereAndInfoDelete(db, a->u.pAndInfo); + } + } + if( a==aLast ) break; + a++; } } if( pWC->a!=pWC->aStatic ){ @@ -149117,28 +151912,52 @@ SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){ ** These routines walk (recursively) an expression tree and generate ** a bitmask indicating which tables are used in that expression ** tree. +** +** sqlite3WhereExprUsage(MaskSet, Expr) -> +** +** Return a Bitmask of all tables referenced by Expr. Expr can be +** be NULL, in which case 0 is returned. +** +** sqlite3WhereExprUsageNN(MaskSet, Expr) -> +** +** Same as sqlite3WhereExprUsage() except that Expr must not be +** NULL. The "NN" suffix on the name stands for "Not Null". +** +** sqlite3WhereExprListUsage(MaskSet, ExprList) -> +** +** Return a Bitmask of all tables referenced by every expression +** in the expression list ExprList. ExprList can be NULL, in which +** case 0 is returned. +** +** sqlite3WhereExprUsageFull(MaskSet, ExprList) -> +** +** Internal use only. Called only by sqlite3WhereExprUsageNN() for +** complex expressions that require pushing register values onto +** the stack. Many calls to sqlite3WhereExprUsageNN() do not need +** the more complex analysis done by this routine. Hence, the +** computations done by this routine are broken out into a separate +** "no-inline" function to avoid the stack push overhead in the +** common case where it is not needed. */ -SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet *pMaskSet, Expr *p){ +static SQLITE_NOINLINE Bitmask sqlite3WhereExprUsageFull( + WhereMaskSet *pMaskSet, + Expr *p +){ Bitmask mask; - if( p->op==TK_COLUMN && !ExprHasProperty(p, EP_FixedCol) ){ - return sqlite3WhereGetMask(pMaskSet, p->iTable); - }else if( ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){ - assert( p->op!=TK_IF_NULL_ROW ); - return 0; - } mask = (p->op==TK_IF_NULL_ROW) ? sqlite3WhereGetMask(pMaskSet, p->iTable) : 0; if( p->pLeft ) mask |= sqlite3WhereExprUsageNN(pMaskSet, p->pLeft); if( p->pRight ){ mask |= sqlite3WhereExprUsageNN(pMaskSet, p->pRight); assert( p->x.pList==0 ); - }else if( ExprHasProperty(p, EP_xIsSelect) ){ + }else if( ExprUseXSelect(p) ){ if( ExprHasProperty(p, EP_VarSelect) ) pMaskSet->bVarSelect = 1; mask |= exprSelectUsage(pMaskSet, p->x.pSelect); }else if( p->x.pList ){ mask |= sqlite3WhereExprListUsage(pMaskSet, p->x.pList); } #ifndef SQLITE_OMIT_WINDOWFUNC - if( (p->op==TK_FUNCTION || p->op==TK_AGG_FUNCTION) && p->y.pWin ){ + if( (p->op==TK_FUNCTION || p->op==TK_AGG_FUNCTION) && ExprUseYWin(p) ){ + assert( p->y.pWin!=0 ); mask |= sqlite3WhereExprListUsage(pMaskSet, p->y.pWin->pPartition); mask |= sqlite3WhereExprListUsage(pMaskSet, p->y.pWin->pOrderBy); mask |= sqlite3WhereExprUsage(pMaskSet, p->y.pWin->pFilter); @@ -149146,6 +151965,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet *pMaskSet, Expr *p){ #endif return mask; } +SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet *pMaskSet, Expr *p){ + if( p->op==TK_COLUMN && !ExprHasProperty(p, EP_FixedCol) ){ + return sqlite3WhereGetMask(pMaskSet, p->iTable); + }else if( ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){ + assert( p->op!=TK_IF_NULL_ROW ); + return 0; + } + return sqlite3WhereExprUsageFull(pMaskSet, p); +} SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){ return p ? sqlite3WhereExprUsageNN(pMaskSet,p) : 0; } @@ -149213,7 +152041,9 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( if( pColRef==0 ) return; pColRef->iTable = pItem->iCursor; pColRef->iColumn = k++; + assert( ExprUseYTab(pColRef) ); pColRef->y.pTab = pTab; + pItem->colUsed |= sqlite3ExprColUsed(pColRef); pRhs = sqlite3PExpr(pParse, TK_UPLUS, sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0); pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, pRhs); @@ -149258,8 +152088,14 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( */ typedef struct HiddenIndexInfo HiddenIndexInfo; struct HiddenIndexInfo { - WhereClause *pWC; /* The Where clause being analyzed */ - Parse *pParse; /* The parsing context */ + WhereClause *pWC; /* The Where clause being analyzed */ + Parse *pParse; /* The parsing context */ + int eDistinct; /* Value to return from sqlite3_vtab_distinct() */ + u32 mIn; /* Mask of terms that are IN (...) */ + u32 mHandleIn; /* Terms that vtab will handle as IN (...) */ + sqlite3_value *aRhs[1]; /* RHS values for constraints. MUST BE LAST + ** because extra space is allocated to hold up + ** to nTerm such values */ }; /* Forward declaration of methods */ @@ -149462,7 +152298,12 @@ static int whereOrInsert( SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet *pMaskSet, int iCursor){ int i; assert( pMaskSet->n<=(int)sizeof(Bitmask)*8 ); - for(i=0; in; i++){ + assert( pMaskSet->n>0 || pMaskSet->ix[0]<0 ); + assert( iCursor>=-1 ); + if( pMaskSet->ix[0]==iCursor ){ + return 1; + } + for(i=1; in; i++){ if( pMaskSet->ix[i]==iCursor ){ return MASKBIT(i); } @@ -149514,8 +152355,10 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ iColumn = pScan->aiColumn[pScan->iEquiv-1]; iCur = pScan->aiCur[pScan->iEquiv-1]; assert( pWC!=0 ); + assert( iCur>=0 ); do{ for(pTerm=pWC->a+k; knTerm; k++, pTerm++){ + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 || pTerm->leftCursor<0 ); if( pTerm->leftCursor==iCur && pTerm->u.x.leftColumn==iColumn && (iColumn!=XN_EXPR @@ -149557,7 +152400,8 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ } } if( (pTerm->eOperator & (WO_EQ|WO_IS))!=0 - && (pX = pTerm->pExpr->pRight)->op==TK_COLUMN + && (pX = pTerm->pExpr->pRight, ALWAYS(pX!=0)) + && pX->op==TK_COLUMN && pX->iTable==pScan->aiCur[0] && pX->iColumn==pScan->aiColumn[0] ){ @@ -149644,16 +152488,16 @@ static WhereTerm *whereScanInit( if( pIdx ){ int j = iColumn; iColumn = pIdx->aiColumn[j]; - if( iColumn==XN_EXPR ){ - pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr; - pScan->zCollName = pIdx->azColl[j]; - pScan->aiColumn[0] = XN_EXPR; - return whereScanInitIndexExpr(pScan); - }else if( iColumn==pIdx->pTable->iPKey ){ + if( iColumn==pIdx->pTable->iPKey ){ iColumn = XN_ROWID; }else if( iColumn>=0 ){ pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity; pScan->zCollName = pIdx->azColl[j]; + }else if( iColumn==XN_EXPR ){ + pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr; + pScan->zCollName = pIdx->azColl[j]; + pScan->aiColumn[0] = XN_EXPR; + return whereScanInitIndexExpr(pScan); } }else if( iColumn==XN_EXPR ){ return 0; @@ -149896,12 +152740,14 @@ static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ int i; if( !sqlite3WhereTrace ) return; for(i=0; inConstraint; i++){ - sqlite3DebugPrintf(" constraint[%d]: col=%d termid=%d op=%d usabled=%d\n", + sqlite3DebugPrintf( + " constraint[%d]: col=%d termid=%d op=%d usabled=%d collseq=%s\n", i, p->aConstraint[i].iColumn, p->aConstraint[i].iTermOffset, p->aConstraint[i].op, - p->aConstraint[i].usable); + p->aConstraint[i].usable, + sqlite3_vtab_collation(p,i)); } for(i=0; inOrderBy; i++){ sqlite3DebugPrintf(" orderby[%d]: col=%d desc=%d\n", @@ -149937,9 +152783,9 @@ static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ ** index existed. */ static int termCanDriveIndex( - WhereTerm *pTerm, /* WHERE clause term to check */ - SrcItem *pSrc, /* Table we are trying to access */ - Bitmask notReady /* Tables in outer loops of the join */ + const WhereTerm *pTerm, /* WHERE clause term to check */ + const SrcItem *pSrc, /* Table we are trying to access */ + const Bitmask notReady /* Tables in outer loops of the join */ ){ char aff; if( pTerm->leftCursor!=pSrc->iCursor ) return 0; @@ -149954,6 +152800,7 @@ static int termCanDriveIndex( return 0; } if( (pTerm->prereqRight & notReady)!=0 ) return 0; + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); if( pTerm->u.x.leftColumn<0 ) return 0; aff = pSrc->pTab->aCol[pTerm->u.x.leftColumn].affinity; if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; @@ -149969,11 +152816,11 @@ static int termCanDriveIndex( ** and to set up the WhereLevel object pLevel so that the code generator ** makes use of the automatic index. */ -static void constructAutomaticIndex( +static SQLITE_NOINLINE void constructAutomaticIndex( Parse *pParse, /* The parsing context */ - WhereClause *pWC, /* The WHERE clause */ - SrcItem *pSrc, /* The FROM clause term to get the next index */ - Bitmask notReady, /* Mask of cursors that are not available */ + const WhereClause *pWC, /* The WHERE clause */ + const SrcItem *pSrc, /* The FROM clause term to get the next index */ + const Bitmask notReady, /* Mask of cursors that are not available */ WhereLevel *pLevel /* Write new index here */ ){ int nKeyCol; /* Number of columns in the constructed index */ @@ -150015,25 +152862,27 @@ static void constructAutomaticIndex( idxCols = 0; for(pTerm=pWC->a; pTermpExpr; - assert( !ExprHasProperty(pExpr, EP_FromJoin) /* prereq always non-zero */ - || pExpr->iRightJoinTable!=pSrc->iCursor /* for the right-hand */ - || pLoop->prereq!=0 ); /* table of a LEFT JOIN */ - if( pLoop->prereq==0 - && (pTerm->wtFlags & TERM_VIRTUAL)==0 - && !ExprHasProperty(pExpr, EP_FromJoin) - && sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor) ){ + /* Make the automatic index a partial index if there are terms in the + ** WHERE clause (or the ON clause of a LEFT join) that constrain which + ** rows of the target table (pSrc) that can be used. */ + if( (pTerm->wtFlags & TERM_VIRTUAL)==0 + && sqlite3ExprIsTableConstraint(pExpr, pSrc) + ){ pPartial = sqlite3ExprAnd(pParse, pPartial, sqlite3ExprDup(pParse->db, pExpr, 0)); } if( termCanDriveIndex(pTerm, pSrc, notReady) ){ - int iCol = pTerm->u.x.leftColumn; - Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); + int iCol; + Bitmask cMask; + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); + iCol = pTerm->u.x.leftColumn; + cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); testcase( iCol==BMS ); testcase( iCol==BMS-1 ); if( !sentWarning ){ sqlite3_log(SQLITE_WARNING_AUTOINDEX, "automatic index on %s(%s)", pTable->zName, - pTable->aCol[iCol].zName); + pTable->aCol[iCol].zCnName); sentWarning = 1; } if( (idxCols & cMask)==0 ){ @@ -150079,8 +152928,11 @@ static void constructAutomaticIndex( idxCols = 0; for(pTerm=pWC->a; pTermu.x.leftColumn; - Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); + int iCol; + Bitmask cMask; + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); + iCol = pTerm->u.x.leftColumn; + cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); testcase( iCol==BMS-1 ); testcase( iCol==BMS ); if( (idxCols & cMask)==0 ){ @@ -150122,6 +152974,10 @@ static void constructAutomaticIndex( sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "for %s", pTable->zName)); + if( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ){ + pLevel->regFilter = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Blob, 10000, pLevel->regFilter); + } /* Fill the automatic index with content */ pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom]; @@ -150144,6 +153000,10 @@ static void constructAutomaticIndex( regBase = sqlite3GenerateIndexKey( pParse, pIdx, pLevel->iTabCur, regRecord, 0, 0, 0, 0 ); + if( pLevel->regFilter ){ + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, + regBase, pLoop->u.btree.nEq); + } sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); @@ -150170,22 +153030,149 @@ static void constructAutomaticIndex( } #endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ +/* +** Generate bytecode that will initialize a Bloom filter that is appropriate +** for pLevel. +** +** If there are inner loops within pLevel that have the WHERE_BLOOMFILTER +** flag set, initialize a Bloomfilter for them as well. Except don't do +** this recursive initialization if the SQLITE_BloomPulldown optimization has +** been turned off. +** +** When the Bloom filter is initialized, the WHERE_BLOOMFILTER flag is cleared +** from the loop, but the regFilter value is set to a register that implements +** the Bloom filter. When regFilter is positive, the +** sqlite3WhereCodeOneLoopStart() will generate code to test the Bloom filter +** and skip the subsequence B-Tree seek if the Bloom filter indicates that +** no matching rows exist. +** +** This routine may only be called if it has previously been determined that +** the loop would benefit from a Bloom filter, and the WHERE_BLOOMFILTER bit +** is set. +*/ +static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( + WhereInfo *pWInfo, /* The WHERE clause */ + int iLevel, /* Index in pWInfo->a[] that is pLevel */ + WhereLevel *pLevel, /* Make a Bloom filter for this FROM term */ + Bitmask notReady /* Loops that are not ready */ +){ + int addrOnce; /* Address of opening OP_Once */ + int addrTop; /* Address of OP_Rewind */ + int addrCont; /* Jump here to skip a row */ + const WhereTerm *pTerm; /* For looping over WHERE clause terms */ + const WhereTerm *pWCEnd; /* Last WHERE clause term */ + Parse *pParse = pWInfo->pParse; /* Parsing context */ + Vdbe *v = pParse->pVdbe; /* VDBE under construction */ + WhereLoop *pLoop = pLevel->pWLoop; /* The loop being coded */ + int iCur; /* Cursor for table getting the filter */ + + assert( pLoop!=0 ); + assert( v!=0 ); + assert( pLoop->wsFlags & WHERE_BLOOMFILTER ); + + addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); + do{ + const SrcItem *pItem; + const Table *pTab; + u64 sz; + sqlite3WhereExplainBloomFilter(pParse, pWInfo, pLevel); + addrCont = sqlite3VdbeMakeLabel(pParse); + iCur = pLevel->iTabCur; + pLevel->regFilter = ++pParse->nMem; + + /* The Bloom filter is a Blob held in a register. Initialize it + ** to zero-filled blob of at least 80K bits, but maybe more if the + ** estimated size of the table is larger. We could actually + ** measure the size of the table at run-time using OP_Count with + ** P3==1 and use that value to initialize the blob. But that makes + ** testing complicated. By basing the blob size on the value in the + ** sqlite_stat1 table, testing is much easier. + */ + pItem = &pWInfo->pTabList->a[pLevel->iFrom]; + assert( pItem!=0 ); + pTab = pItem->pTab; + assert( pTab!=0 ); + sz = sqlite3LogEstToInt(pTab->nRowLogEst); + if( sz<10000 ){ + sz = 10000; + }else if( sz>10000000 ){ + sz = 10000000; + } + sqlite3VdbeAddOp2(v, OP_Blob, (int)sz, pLevel->regFilter); + + addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, iCur); VdbeCoverage(v); + pWCEnd = &pWInfo->sWC.a[pWInfo->sWC.nTerm]; + for(pTerm=pWInfo->sWC.a; pTermpExpr; + if( (pTerm->wtFlags & TERM_VIRTUAL)==0 + && sqlite3ExprIsTableConstraint(pExpr, pItem) + ){ + sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); + } + } + if( pLoop->wsFlags & WHERE_IPK ){ + int r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, r1); + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, r1, 1); + sqlite3ReleaseTempReg(pParse, r1); + }else{ + Index *pIdx = pLoop->u.btree.pIndex; + int n = pLoop->u.btree.nEq; + int r1 = sqlite3GetTempRange(pParse, n); + int jj; + for(jj=0; jjaiColumn[jj]; + assert( pIdx->pTable==pItem->pTab ); + sqlite3ExprCodeGetColumnOfTable(v, pIdx->pTable, iCur, iCol,r1+jj); + } + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, r1, n); + sqlite3ReleaseTempRange(pParse, r1, n); + } + sqlite3VdbeResolveLabel(v, addrCont); + sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); + VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addrTop); + pLoop->wsFlags &= ~WHERE_BLOOMFILTER; + if( OptimizationDisabled(pParse->db, SQLITE_BloomPulldown) ) break; + while( ++iLevel < pWInfo->nLevel ){ + const SrcItem *pTabItem; + pLevel = &pWInfo->a[iLevel]; + pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; + if( pTabItem->fg.jointype & JT_LEFT ) continue; + pLoop = pLevel->pWLoop; + if( NEVER(pLoop==0) ) continue; + if( pLoop->prereq & notReady ) continue; + if( (pLoop->wsFlags & (WHERE_BLOOMFILTER|WHERE_COLUMN_IN)) + ==WHERE_BLOOMFILTER + ){ + /* This is a candidate for bloom-filter pull-down (early evaluation). + ** The test that WHERE_COLUMN_IN is omitted is important, as we are + ** not able to do early evaluation of bloom filters that make use of + ** the IN operator */ + break; + } + } + }while( iLevel < pWInfo->nLevel ); + sqlite3VdbeJumpHere(v, addrOnce); +} + + #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Allocate and populate an sqlite3_index_info structure. It is the ** responsibility of the caller to eventually release the structure -** by passing the pointer returned by this function to sqlite3_free(). +** by passing the pointer returned by this function to freeIndexInfo(). */ static sqlite3_index_info *allocateIndexInfo( - Parse *pParse, /* The parsing context */ + WhereInfo *pWInfo, /* The WHERE clause */ WhereClause *pWC, /* The WHERE clause being analyzed */ Bitmask mUnusable, /* Ignore terms with these prereqs */ SrcItem *pSrc, /* The FROM clause term that is the vtab */ - ExprList *pOrderBy, /* The ORDER BY clause */ u16 *pmNoOmit /* Mask of terms not to omit */ ){ int i, j; int nTerm; + Parse *pParse = pWInfo->pParse; struct sqlite3_index_constraint *pIdxCons; struct sqlite3_index_orderby *pIdxOrderBy; struct sqlite3_index_constraint_usage *pUsage; @@ -150194,10 +153181,21 @@ static sqlite3_index_info *allocateIndexInfo( int nOrderBy; sqlite3_index_info *pIdxInfo; u16 mNoOmit = 0; + const Table *pTab; + int eDistinct = 0; + ExprList *pOrderBy = pWInfo->pOrderBy; - /* Count the number of possible WHERE clause constraints referring - ** to this virtual table */ + assert( pSrc!=0 ); + pTab = pSrc->pTab; + assert( pTab!=0 ); + assert( IsVirtual(pTab) ); + + /* Find all WHERE clause constraints referring to this virtual table. + ** Mark each term with the TERM_OK flag. Set nTerm to the number of + ** terms found. + */ for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ + pTerm->wtFlags &= ~TERM_OK; if( pTerm->leftCursor != pSrc->iCursor ) continue; if( pTerm->prereqRight & mUnusable ) continue; assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); @@ -150207,8 +153205,21 @@ static sqlite3_index_info *allocateIndexInfo( testcase( pTerm->eOperator & WO_ALL ); if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; if( pTerm->wtFlags & TERM_VNULL ) continue; - assert( pTerm->u.x.leftColumn>=(-1) ); + + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); + assert( pTerm->u.x.leftColumn>=XN_ROWID ); + assert( pTerm->u.x.leftColumnnCol ); + + /* tag-20191211-002: WHERE-clause constraints are not useful to the + ** right-hand table of a LEFT JOIN. See tag-20191211-001 for the + ** equivalent restriction for ordinary tables. */ + if( (pSrc->fg.jointype & JT_LEFT)!=0 + && !ExprHasProperty(pTerm->pExpr, EP_FromJoin) + ){ + continue; + } nTerm++; + pTerm->wtFlags |= TERM_OK; } /* If the ORDER BY clause contains only columns in the current @@ -150220,11 +153231,47 @@ static sqlite3_index_info *allocateIndexInfo( int n = pOrderBy->nExpr; for(i=0; ia[i].pExpr; - if( pExpr->op!=TK_COLUMN || pExpr->iTable!=pSrc->iCursor ) break; + Expr *pE2; + + /* Skip over constant terms in the ORDER BY clause */ + if( sqlite3ExprIsConstant(pExpr) ){ + continue; + } + + /* Virtual tables are unable to deal with NULLS FIRST */ if( pOrderBy->a[i].sortFlags & KEYINFO_ORDER_BIGNULL ) break; + + /* First case - a direct column references without a COLLATE operator */ + if( pExpr->op==TK_COLUMN && pExpr->iTable==pSrc->iCursor ){ + assert( pExpr->iColumn>=XN_ROWID && pExpr->iColumnnCol ); + continue; + } + + /* 2nd case - a column reference with a COLLATE operator. Only match + ** of the COLLATE operator matches the collation of the column. */ + if( pExpr->op==TK_COLLATE + && (pE2 = pExpr->pLeft)->op==TK_COLUMN + && pE2->iTable==pSrc->iCursor + ){ + const char *zColl; /* The collating sequence name */ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + assert( pExpr->u.zToken!=0 ); + assert( pE2->iColumn>=XN_ROWID && pE2->iColumnnCol ); + pExpr->iColumn = pE2->iColumn; + if( pE2->iColumn<0 ) continue; /* Collseq does not matter for rowid */ + zColl = sqlite3ColumnColl(&pTab->aCol[pE2->iColumn]); + if( zColl==0 ) zColl = sqlite3StrBINARY; + if( sqlite3_stricmp(pExpr->u.zToken, zColl)==0 ) continue; + } + + /* No matches cause a break out of the loop */ + break; } - if( i==n){ + if( i==n ){ nOrderBy = n; + if( (pWInfo->wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY)) ){ + eDistinct = 1 + ((pWInfo->wctrlFlags & WHERE_DISTINCTBY)!=0); + } } } @@ -150232,46 +153279,35 @@ static sqlite3_index_info *allocateIndexInfo( */ pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm - + sizeof(*pIdxOrderBy)*nOrderBy + sizeof(*pHidden) ); + + sizeof(*pIdxOrderBy)*nOrderBy + sizeof(*pHidden) + + sizeof(sqlite3_value*)*nTerm ); if( pIdxInfo==0 ){ sqlite3ErrorMsg(pParse, "out of memory"); return 0; } pHidden = (struct HiddenIndexInfo*)&pIdxInfo[1]; - pIdxCons = (struct sqlite3_index_constraint*)&pHidden[1]; + pIdxCons = (struct sqlite3_index_constraint*)&pHidden->aRhs[nTerm]; pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm]; pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy]; - pIdxInfo->nOrderBy = nOrderBy; pIdxInfo->aConstraint = pIdxCons; pIdxInfo->aOrderBy = pIdxOrderBy; pIdxInfo->aConstraintUsage = pUsage; pHidden->pWC = pWC; pHidden->pParse = pParse; + pHidden->eDistinct = eDistinct; + pHidden->mIn = 0; for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ u16 op; - if( pTerm->leftCursor != pSrc->iCursor ) continue; - if( pTerm->prereqRight & mUnusable ) continue; - assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); - testcase( pTerm->eOperator & WO_IN ); - testcase( pTerm->eOperator & WO_IS ); - testcase( pTerm->eOperator & WO_ISNULL ); - testcase( pTerm->eOperator & WO_ALL ); - if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; - if( pTerm->wtFlags & TERM_VNULL ) continue; - - /* tag-20191211-002: WHERE-clause constraints are not useful to the - ** right-hand table of a LEFT JOIN. See tag-20191211-001 for the - ** equivalent restriction for ordinary tables. */ - if( (pSrc->fg.jointype & JT_LEFT)!=0 - && !ExprHasProperty(pTerm->pExpr, EP_FromJoin) - ){ - continue; - } - assert( pTerm->u.x.leftColumn>=(-1) ); + if( (pTerm->wtFlags & TERM_OK)==0 ) continue; pIdxCons[j].iColumn = pTerm->u.x.leftColumn; pIdxCons[j].iTermOffset = i; op = pTerm->eOperator & WO_ALL; - if( op==WO_IN ) op = WO_EQ; + if( op==WO_IN ){ + if( (pTerm->wtFlags & TERM_SLICE)==0 ){ + pHidden->mIn |= SMASKBIT32(j); + } + op = WO_EQ; + } if( op==WO_AUX ){ pIdxCons[j].op = pTerm->eMatchOp; }else if( op & (WO_ISNULL|WO_IS) ){ @@ -150304,17 +153340,42 @@ static sqlite3_index_info *allocateIndexInfo( j++; } + assert( j==nTerm ); pIdxInfo->nConstraint = j; - for(i=0; ia[i].pExpr; - pIdxOrderBy[i].iColumn = pExpr->iColumn; - pIdxOrderBy[i].desc = pOrderBy->a[i].sortFlags & KEYINFO_ORDER_DESC; + if( sqlite3ExprIsConstant(pExpr) ) continue; + assert( pExpr->op==TK_COLUMN + || (pExpr->op==TK_COLLATE && pExpr->pLeft->op==TK_COLUMN + && pExpr->iColumn==pExpr->pLeft->iColumn) ); + pIdxOrderBy[j].iColumn = pExpr->iColumn; + pIdxOrderBy[j].desc = pOrderBy->a[i].sortFlags & KEYINFO_ORDER_DESC; + j++; } + pIdxInfo->nOrderBy = j; *pmNoOmit = mNoOmit; return pIdxInfo; } +/* +** Free an sqlite3_index_info structure allocated by allocateIndexInfo() +** and possibly modified by xBestIndex methods. +*/ +static void freeIndexInfo(sqlite3 *db, sqlite3_index_info *pIdxInfo){ + HiddenIndexInfo *pHidden; + int i; + assert( pIdxInfo!=0 ); + pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; + assert( pHidden->pParse!=0 ); + assert( pHidden->pParse->db==db ); + for(i=0; inConstraint; i++){ + sqlite3ValueFree(pHidden->aRhs[i]); /* IMP: R-14553-25174 */ + pHidden->aRhs[i] = 0; + } + sqlite3DbFree(db, pIdxInfo); +} + /* ** The table object reference passed as the second argument to this function ** must represent a virtual table. This function invokes the xBestIndex() @@ -150336,7 +153397,9 @@ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ int rc; whereTraceIndexInfoInputs(p); + pParse->db->nSchemaLock++; rc = pVtab->pModule->xBestIndex(pVtab, p); + pParse->db->nSchemaLock--; whereTraceIndexInfoOutputs(p); if( rc!=SQLITE_OK && rc!=SQLITE_CONSTRAINT ){ @@ -151030,6 +154093,7 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){ if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L'; if( pTerm->wtFlags & TERM_CODED ) zType[3] = 'C'; if( pTerm->eOperator & WO_SINGLE ){ + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); sqlite3_snprintf(sizeof(zLeft),zLeft,"left={%d:%d}", pTerm->leftCursor, pTerm->u.x.leftColumn); }else if( (pTerm->eOperator & WO_OR)!=0 && pTerm->u.pOrInfo!=0 ){ @@ -151047,7 +154111,7 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){ sqlite3DebugPrintf(" prob=%-3d prereq=%llx,%llx", pTerm->truthProb, (u64)pTerm->prereqAll, (u64)pTerm->prereqRight); } - if( pTerm->u.x.iField ){ + if( (pTerm->eOperator & (WO_OR|WO_AND))==0 && pTerm->u.x.iField ){ sqlite3DebugPrintf(" iField=%d", pTerm->u.x.iField); } if( pTerm->iParent>=0 ){ @@ -151109,9 +154173,9 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ sqlite3_free(z); } if( p->wsFlags & WHERE_SKIPSCAN ){ - sqlite3DebugPrintf(" f %05x %d-%d", p->wsFlags, p->nLTerm,p->nSkip); + sqlite3DebugPrintf(" f %06x %d-%d", p->wsFlags, p->nLTerm,p->nSkip); }else{ - sqlite3DebugPrintf(" f %05x N %d", p->wsFlags, p->nLTerm); + sqlite3DebugPrintf(" f %06x N %d", p->wsFlags, p->nLTerm); } sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){ @@ -151211,7 +154275,8 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ assert( pWInfo!=0 ); for(i=0; inLevel; i++){ WhereLevel *pLevel = &pWInfo->a[i]; - if( pLevel->pWLoop && (pLevel->pWLoop->wsFlags & WHERE_IN_ABLE) ){ + if( pLevel->pWLoop && (pLevel->pWLoop->wsFlags & WHERE_IN_ABLE)!=0 ){ + assert( (pLevel->pWLoop->wsFlags & WHERE_MULTI_OR)==0 ); sqlite3DbFree(db, pLevel->u.in.aInLoop); } } @@ -151239,7 +154304,8 @@ static void whereUndoExprMods(WhereInfo *pWInfo){ /* ** Return TRUE if all of the following are true: ** -** (1) X has the same or lower cost that Y +** (1) X has the same or lower cost, or returns the same or fewer rows, +** than Y. ** (2) X uses fewer WHERE clause terms than Y ** (3) Every WHERE clause term used by X is also used by Y ** (4) X skips at least as many columns as Y @@ -151262,11 +154328,8 @@ static int whereLoopCheaperProperSubset( if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){ return 0; /* X is not a subset of Y */ } + if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; if( pY->nSkip > pX->nSkip ) return 0; - if( pX->rRun >= pY->rRun ){ - if( pX->rRun > pY->rRun ) return 0; /* X costs more than Y */ - if( pX->nOut > pY->nOut ) return 0; /* X costs more than Y */ - } for(i=pX->nLTerm-1; i>=0; i--){ if( pX->aLTerm[i]==0 ) continue; for(j=pY->nLTerm-1; j>=0; j--){ @@ -151282,8 +154345,8 @@ static int whereLoopCheaperProperSubset( } /* -** Try to adjust the cost of WhereLoop pTemplate upwards or downwards so -** that: +** Try to adjust the cost and number of output rows of WhereLoop pTemplate +** upwards or downwards so that: ** ** (1) pTemplate costs less than any other WhereLoops that are a proper ** subset of pTemplate @@ -151304,16 +154367,20 @@ static void whereLoopAdjustCost(const WhereLoop *p, WhereLoop *pTemplate){ /* Adjust pTemplate cost downward so that it is cheaper than its ** subset p. */ WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n", - pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut-1)); - pTemplate->rRun = p->rRun; - pTemplate->nOut = p->nOut - 1; + pTemplate->rRun, pTemplate->nOut, + MIN(p->rRun, pTemplate->rRun), + MIN(p->nOut - 1, pTemplate->nOut))); + pTemplate->rRun = MIN(p->rRun, pTemplate->rRun); + pTemplate->nOut = MIN(p->nOut - 1, pTemplate->nOut); }else if( whereLoopCheaperProperSubset(pTemplate, p) ){ /* Adjust pTemplate cost upward so that it is costlier than p since ** pTemplate is a proper subset of p */ WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n", - pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut+1)); - pTemplate->rRun = p->rRun; - pTemplate->nOut = p->nOut + 1; + pTemplate->rRun, pTemplate->nOut, + MAX(p->rRun, pTemplate->rRun), + MAX(p->nOut + 1, pTemplate->nOut))); + pTemplate->rRun = MAX(p->rRun, pTemplate->rRun); + pTemplate->nOut = MAX(p->nOut + 1, pTemplate->nOut); } } } @@ -151568,11 +154635,11 @@ static void whereLoopOutputAdjust( LogEst iReduce = 0; /* pLoop->nOut should not exceed nRow-iReduce */ assert( (pLoop->wsFlags & WHERE_AUTO_INDEX)==0 ); - for(i=pWC->nTerm, pTerm=pWC->a; i>0; i--, pTerm++){ + for(i=pWC->nBase, pTerm=pWC->a; i>0; i--, pTerm++){ assert( pTerm!=0 ); - if( (pTerm->wtFlags & TERM_VIRTUAL)!=0 ) break; - if( (pTerm->prereqAll & pLoop->maskSelf)==0 ) continue; if( (pTerm->prereqAll & notAllowed)!=0 ) continue; + if( (pTerm->prereqAll & pLoop->maskSelf)==0 ) continue; + if( (pTerm->wtFlags & TERM_VIRTUAL)!=0 ) continue; for(j=pLoop->nLTerm-1; j>=0; j--){ pX = pLoop->aLTerm[j]; if( pX==0 ) continue; @@ -151580,6 +154647,22 @@ static void whereLoopOutputAdjust( if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break; } if( j<0 ){ + if( pLoop->maskSelf==pTerm->prereqAll ){ + /* If there are extra terms in the WHERE clause not used by an index + ** that depend only on the table being scanned, and that will tend to + ** cause many rows to be omitted, then mark that table as + ** "self-culling". + ** + ** 2022-03-24: Self-culling only applies if either the extra terms + ** are straight comparison operators that are non-true with NULL + ** operand, or if the loop is not a LEFT JOIN. + */ + if( (pTerm->eOperator & 0x3f)!=0 + || (pWC->pWInfo->pTabList->a[pLoop->iTab].fg.jointype & JT_LEFT)==0 + ){ + pLoop->wsFlags |= WHERE_SELFCULL; + } + } if( pTerm->truthProb<=0 ){ /* If a truth probability is specified using the likelihood() hints, ** then use the probability provided by the application. */ @@ -151607,7 +154690,9 @@ static void whereLoopOutputAdjust( } } } - if( pLoop->nOut > nRow-iReduce ) pLoop->nOut = nRow - iReduce; + if( pLoop->nOut > nRow-iReduce ){ + pLoop->nOut = nRow - iReduce; + } } /* @@ -151644,9 +154729,12 @@ static int whereRangeVectorLen( char aff; /* Comparison affinity */ char idxaff = 0; /* Indexed columns affinity */ CollSeq *pColl; /* Comparison collation sequence */ - Expr *pLhs = pTerm->pExpr->pLeft->x.pList->a[i].pExpr; - Expr *pRhs = pTerm->pExpr->pRight; - if( pRhs->flags & EP_xIsSelect ){ + Expr *pLhs, *pRhs; + + assert( ExprUseXList(pTerm->pExpr->pLeft) ); + pLhs = pTerm->pExpr->pLeft->x.pList->a[i].pExpr; + pRhs = pTerm->pExpr->pRight; + if( ExprUseXSelect(pRhs) ){ pRhs = pRhs->x.pSelect->pEList->a[i].pExpr; }else{ pRhs = pRhs->x.pList->a[i].pExpr; @@ -151807,7 +154895,7 @@ static int whereLoopAddBtreeIndex( if( eOp & WO_IN ){ Expr *pExpr = pTerm->pExpr; - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( ExprUseXSelect(pExpr) ){ /* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */ int i; nIn = 46; assert( 46==sqlite3LogEst(25) ); @@ -151948,7 +155036,7 @@ static int whereLoopAddBtreeIndex( if( nInMul==0 && pProbe->nSample && ALWAYS(pNew->u.btree.nEq<=pProbe->nSampleCol) - && ((eOp & WO_IN)==0 || !ExprHasProperty(pTerm->pExpr, EP_xIsSelect)) + && ((eOp & WO_IN)==0 || ExprUseXList(pTerm->pExpr)) && OptimizationEnabled(db, SQLITE_Stat4) ){ Expr *pExpr = pTerm->pExpr; @@ -152149,7 +155237,7 @@ static int whereUsablePartialIndex( for(i=0, pTerm=pWC->a; inTerm; i++, pTerm++){ Expr *pExpr; pExpr = pTerm->pExpr; - if( (!ExprHasProperty(pExpr, EP_FromJoin) || pExpr->iRightJoinTable==iTab) + if( (!ExprHasProperty(pExpr, EP_FromJoin) || pExpr->w.iRightJoinTable==iTab) && (isLeft==0 || ExprHasProperty(pExpr, EP_FromJoin)) && sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, iTab) && (pTerm->wtFlags & TERM_VNULL)==0 @@ -152212,7 +155300,6 @@ static int whereLoopAddBtree( int iSortIdx = 1; /* Index number */ int b; /* A boolean value */ LogEst rSize; /* number of rows in the table */ - LogEst rLogSize; /* Logarithm of the number of rows in the table */ WhereClause *pWC; /* The parsed WHERE clause */ Table *pTab; /* Table being queried */ @@ -152225,6 +155312,7 @@ static int whereLoopAddBtree( assert( !IsVirtual(pSrc->pTab) ); if( pSrc->fg.isIndexedBy ){ + assert( pSrc->fg.isCte==0 ); /* An INDEXED BY clause specifies a particular index to use */ pProbe = pSrc->u2.pIBIndex; }else if( !HasRowid(pTab) ){ @@ -152255,7 +155343,6 @@ static int whereLoopAddBtree( pProbe = &sPk; } rSize = pTab->nRowLogEst; - rLogSize = estLog(rSize); #ifndef SQLITE_OMIT_AUTOMATIC_INDEX /* Automatic indexes */ @@ -152269,8 +155356,10 @@ static int whereLoopAddBtree( && !pSrc->fg.isRecursive /* Not a recursive common table expression. */ ){ /* Generate auto-index WhereLoops */ + LogEst rLogSize; /* Logarithm of the number of rows in the table */ WhereTerm *pTerm; WhereTerm *pWCEnd = pWC->a + pWC->nTerm; + rLogSize = estLog(rSize); for(pTerm=pWC->a; rc==SQLITE_OK && pTermprereqRight & pNew->maskSelf ) continue; if( termCanDriveIndex(pTerm, pSrc, 0) ){ @@ -152288,7 +155377,7 @@ static int whereLoopAddBtree( ** those objects, since there is no opportunity to add schema ** indexes on subqueries and views. */ pNew->rSetup = rLogSize + rSize; - if( pTab->pSelect==0 && (pTab->tabFlags & TF_Ephemeral)==0 ){ + if( !IsView(pTab) && (pTab->tabFlags & TF_Ephemeral)==0 ){ pNew->rSetup += 28; }else{ pNew->rSetup -= 10; @@ -152450,6 +155539,15 @@ static int whereLoopAddBtree( #ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Return true if pTerm is a virtual table LIMIT or OFFSET term. +*/ +static int isLimitTerm(WhereTerm *pTerm){ + assert( pTerm->eOperator==WO_AUX || pTerm->eMatchOp==0 ); + return pTerm->eMatchOp>=SQLITE_INDEX_CONSTRAINT_LIMIT + && pTerm->eMatchOp<=SQLITE_INDEX_CONSTRAINT_OFFSET; +} + /* ** Argument pIdxInfo is already populated with all constraints that may ** be used by the virtual table identified by pBuilder->pNew->iTab. This @@ -152477,9 +155575,11 @@ static int whereLoopAddVirtualOne( u16 mExclude, /* Exclude terms using these operators */ sqlite3_index_info *pIdxInfo, /* Populated object for xBestIndex */ u16 mNoOmit, /* Do not omit these constraints */ - int *pbIn /* OUT: True if plan uses an IN(...) op */ + int *pbIn, /* OUT: True if plan uses an IN(...) op */ + int *pbRetryLimit /* OUT: Retry without LIMIT/OFFSET */ ){ WhereClause *pWC = pBuilder->pWC; + HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; struct sqlite3_index_constraint *pIdxCons; struct sqlite3_index_constraint_usage *pUsage = pIdxInfo->aConstraintUsage; int i; @@ -152502,6 +155602,7 @@ static int whereLoopAddVirtualOne( pIdxCons->usable = 0; if( (pTerm->prereqRight & mUsable)==pTerm->prereqRight && (pTerm->eOperator & mExclude)==0 + && (pbRetryLimit || !isLimitTerm(pTerm)) ){ pIdxCons->usable = 1; } @@ -152517,6 +155618,7 @@ static int whereLoopAddVirtualOne( pIdxInfo->estimatedRows = 25; pIdxInfo->idxFlags = 0; pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; + pHidden->mHandleIn = 0; /* Invoke the virtual table xBestIndex() method */ rc = vtabBestIndex(pParse, pSrc->pTab, pIdxInfo); @@ -152534,8 +155636,8 @@ static int whereLoopAddVirtualOne( mxTerm = -1; assert( pNew->nLSlot>=nConstraint ); - for(i=0; iaLTerm[i] = 0; - pNew->u.vtab.omitMask = 0; + memset(pNew->aLTerm, 0, sizeof(pNew->aLTerm[0])*nConstraint ); + memset(&pNew->u.vtab, 0, sizeof(pNew->u.vtab)); pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; for(i=0; ieMatchOp==SQLITE_INDEX_CONSTRAINT_OFFSET ){ + pNew->u.vtab.bOmitOffset = 1; + } } - if( (pTerm->eOperator & WO_IN)!=0 ){ + if( SMASKBIT32(i) & pHidden->mHandleIn ){ + pNew->u.vtab.mHandleIn |= MASKBIT32(iTerm); + }else if( (pTerm->eOperator & WO_IN)!=0 ){ /* A virtual table that is constrained by an IN clause may not ** consume the ORDER BY clause because (1) the order of IN terms ** is not necessarily related to the order of output terms and @@ -152580,6 +155687,21 @@ static int whereLoopAddVirtualOne( pIdxInfo->idxFlags &= ~SQLITE_INDEX_SCAN_UNIQUE; *pbIn = 1; assert( (mExclude & WO_IN)==0 ); } + + if( isLimitTerm(pTerm) && *pbIn ){ + /* If there is an IN(...) term handled as an == (separate call to + ** xFilter for each value on the RHS of the IN) and a LIMIT or + ** OFFSET term handled as well, the plan is unusable. Set output + ** variable *pbRetryLimit to true to tell the caller to retry with + ** LIMIT and OFFSET disabled. */ + if( pIdxInfo->needToFreeIdxStr ){ + sqlite3_free(pIdxInfo->idxStr); + pIdxInfo->idxStr = 0; + pIdxInfo->needToFreeIdxStr = 0; + } + *pbRetryLimit = 1; + return SQLITE_OK; + } } } @@ -152624,11 +155746,19 @@ static int whereLoopAddVirtualOne( } /* -** If this function is invoked from within an xBestIndex() callback, it -** returns a pointer to a buffer containing the name of the collation -** sequence associated with element iCons of the sqlite3_index_info.aConstraint -** array. Or, if iCons is out of range or there is no active xBestIndex -** call, return NULL. +** Return the collating sequence for a constraint passed into xBestIndex. +** +** pIdxInfo must be an sqlite3_index_info structure passed into xBestIndex. +** This routine depends on there being a HiddenIndexInfo structure immediately +** following the sqlite3_index_info structure. +** +** Return a pointer to the collation name: +** +** 1. If there is an explicit COLLATE operator on the constaint, return it. +** +** 2. Else, if the column has an alternative collation, return that. +** +** 3. Otherwise, return "BINARY". */ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info *pIdxInfo, int iCons){ HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; @@ -152645,6 +155775,88 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info *pIdxInfo, int return zRet; } +/* +** Return true if constraint iCons is really an IN(...) constraint, or +** false otherwise. If iCons is an IN(...) constraint, set (if bHandle!=0) +** or clear (if bHandle==0) the flag to handle it using an iterator. +*/ +SQLITE_API int sqlite3_vtab_in(sqlite3_index_info *pIdxInfo, int iCons, int bHandle){ + HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; + u32 m = SMASKBIT32(iCons); + if( m & pHidden->mIn ){ + if( bHandle==0 ){ + pHidden->mHandleIn &= ~m; + }else if( bHandle>0 ){ + pHidden->mHandleIn |= m; + } + return 1; + } + return 0; +} + +/* +** This interface is callable from within the xBestIndex callback only. +** +** If possible, set (*ppVal) to point to an object containing the value +** on the right-hand-side of constraint iCons. +*/ +SQLITE_API int sqlite3_vtab_rhs_value( + sqlite3_index_info *pIdxInfo, /* Copy of first argument to xBestIndex */ + int iCons, /* Constraint for which RHS is wanted */ + sqlite3_value **ppVal /* Write value extracted here */ +){ + HiddenIndexInfo *pH = (HiddenIndexInfo*)&pIdxInfo[1]; + sqlite3_value *pVal = 0; + int rc = SQLITE_OK; + if( iCons<0 || iCons>=pIdxInfo->nConstraint ){ + rc = SQLITE_MISUSE; /* EV: R-30545-25046 */ + }else{ + if( pH->aRhs[iCons]==0 ){ + WhereTerm *pTerm = &pH->pWC->a[pIdxInfo->aConstraint[iCons].iTermOffset]; + rc = sqlite3ValueFromExpr( + pH->pParse->db, pTerm->pExpr->pRight, ENC(pH->pParse->db), + SQLITE_AFF_BLOB, &pH->aRhs[iCons] + ); + testcase( rc!=SQLITE_OK ); + } + pVal = pH->aRhs[iCons]; + } + *ppVal = pVal; + + if( rc==SQLITE_OK && pVal==0 ){ /* IMP: R-19933-32160 */ + rc = SQLITE_NOTFOUND; /* IMP: R-36424-56542 */ + } + + return rc; +} + +/* +** Return true if ORDER BY clause may be handled as DISTINCT. +*/ +SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info *pIdxInfo){ + HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; + assert( pHidden->eDistinct==0 + || pHidden->eDistinct==1 + || pHidden->eDistinct==2 ); + return pHidden->eDistinct; +} + +#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ + && !defined(SQLITE_OMIT_VIRTUALTABLE) +/* +** Cause the prepared statement that is associated with a call to +** xBestIndex to open write transactions on all attached schemas. +** This is used by the (built-in) sqlite_dbpage virtual table. +*/ +SQLITE_PRIVATE void sqlite3VtabWriteAll(sqlite3_index_info *pIdxInfo){ + HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; + Parse *pParse = pHidden->pParse; + int nDb = pParse->db->nDb; + int i; + for(i=0; ipNew->iTab. That table is guaranteed to be a virtual table. @@ -152686,6 +155898,7 @@ static int whereLoopAddVirtual( WhereLoop *pNew; Bitmask mBest; /* Tables used by best possible plan */ u16 mNoOmit; + int bRetry = 0; /* True to retry with LIMIT/OFFSET disabled */ assert( (mPrereq & mUnusable)==0 ); pWInfo = pBuilder->pWInfo; @@ -152694,8 +155907,7 @@ static int whereLoopAddVirtual( pNew = pBuilder->pNew; pSrc = &pWInfo->pTabList->a[pNew->iTab]; assert( IsVirtual(pSrc->pTab) ); - p = allocateIndexInfo(pParse, pWC, mUnusable, pSrc, pBuilder->pOrderBy, - &mNoOmit); + p = allocateIndexInfo(pWInfo, pWC, mUnusable, pSrc, &mNoOmit); if( p==0 ) return SQLITE_NOMEM_BKPT; pNew->rSetup = 0; pNew->wsFlags = WHERE_VIRTUALTABLE; @@ -152703,14 +155915,22 @@ static int whereLoopAddVirtual( pNew->u.vtab.needFree = 0; nConstraint = p->nConstraint; if( whereLoopResize(pParse->db, pNew, nConstraint) ){ - sqlite3DbFree(pParse->db, p); + freeIndexInfo(pParse->db, p); return SQLITE_NOMEM_BKPT; } /* First call xBestIndex() with all constraints usable. */ WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pTab->zName)); WHERETRACE(0x40, (" VirtualOne: all usable\n")); - rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn); + rc = whereLoopAddVirtualOne( + pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn, &bRetry + ); + if( bRetry ){ + assert( rc==SQLITE_OK ); + rc = whereLoopAddVirtualOne( + pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn, 0 + ); + } /* If the call to xBestIndex() with all terms enabled produced a plan ** that does not require any source tables (IOW: a plan with mBest==0) @@ -152728,7 +155948,7 @@ static int whereLoopAddVirtual( if( bIn ){ WHERETRACE(0x40, (" VirtualOne: all usable w/o IN\n")); rc = whereLoopAddVirtualOne( - pBuilder, mPrereq, ALLBITS, WO_IN, p, mNoOmit, &bIn); + pBuilder, mPrereq, ALLBITS, WO_IN, p, mNoOmit, &bIn, 0); assert( bIn==0 ); mBestNoIn = pNew->prereq & ~mPrereq; if( mBestNoIn==0 ){ @@ -152755,7 +155975,7 @@ static int whereLoopAddVirtual( WHERETRACE(0x40, (" VirtualOne: mPrev=%04llx mNext=%04llx\n", (sqlite3_uint64)mPrev, (sqlite3_uint64)mNext)); rc = whereLoopAddVirtualOne( - pBuilder, mPrereq, mNext|mPrereq, 0, p, mNoOmit, &bIn); + pBuilder, mPrereq, mNext|mPrereq, 0, p, mNoOmit, &bIn, 0); if( pNew->prereq==mPrereq ){ seenZero = 1; if( bIn==0 ) seenZeroNoIN = 1; @@ -152768,7 +155988,7 @@ static int whereLoopAddVirtual( if( rc==SQLITE_OK && seenZero==0 ){ WHERETRACE(0x40, (" VirtualOne: all disabled\n")); rc = whereLoopAddVirtualOne( - pBuilder, mPrereq, mPrereq, 0, p, mNoOmit, &bIn); + pBuilder, mPrereq, mPrereq, 0, p, mNoOmit, &bIn, 0); if( bIn==0 ) seenZeroNoIN = 1; } @@ -152778,12 +155998,12 @@ static int whereLoopAddVirtual( if( rc==SQLITE_OK && seenZeroNoIN==0 ){ WHERETRACE(0x40, (" VirtualOne: all disabled and w/o IN\n")); rc = whereLoopAddVirtualOne( - pBuilder, mPrereq, mPrereq, WO_IN, p, mNoOmit, &bIn); + pBuilder, mPrereq, mPrereq, WO_IN, p, mNoOmit, &bIn, 0); } } if( p->needToFreeIdxStr ) sqlite3_free(p->idxStr); - sqlite3DbFreeNN(pParse->db, p); + freeIndexInfo(pParse->db, p); WHERETRACE(0x800, ("END %s.addVirtual(), rc=%d\n", pSrc->pTab->zName, rc)); return rc; } @@ -152827,7 +156047,6 @@ static int whereLoopAddOr( int i, j; sSubBuild = *pBuilder; - sSubBuild.pOrderBy = 0; sSubBuild.pOrSet = &sCur; WHERETRACE(0x200, ("Begin processing OR-clause %p\n", pTerm)); @@ -152839,6 +156058,7 @@ static int whereLoopAddOr( tempWC.pOuter = pWC; tempWC.op = TK_AND; tempWC.nTerm = 1; + tempWC.nBase = 1; tempWC.a = pOrTerm; sSubBuild.pWC = &tempWC; }else{ @@ -153310,7 +156530,7 @@ static i8 wherePathSatisfiesOrderBy( if( obSat==obDone ) return (i8)nOrderBy; if( !isOrderDistinct ){ for(i=nOrderBy-1; i>0; i--){ - Bitmask m = MASKBIT(i) - 1; + Bitmask m = ALWAYS(ipWInfo; if( pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE ) return 0; @@ -153833,7 +157054,8 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ pLoop = pBuilder->pNew; pLoop->wsFlags = 0; pLoop->nSkip = 0; - pTerm = sqlite3WhereFindTerm(pWC, iCur, -1, 0, WO_EQ|WO_IS, 0); + pTerm = whereScanInit(&scan, pWC, iCur, -1, WO_EQ|WO_IS, 0); + while( pTerm && pTerm->prereqRight ) pTerm = whereScanNext(&scan); if( pTerm ){ testcase( pTerm->eOperator & WO_IS ); pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_IPK|WHERE_ONEROW; @@ -153852,7 +157074,8 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ ) continue; opMask = pIdx->uniqNotNull ? (WO_EQ|WO_IS) : WO_EQ; for(j=0; jnKeyCol; j++){ - pTerm = sqlite3WhereFindTerm(pWC, iCur, j, 0, opMask, pIdx); + pTerm = whereScanInit(&scan, pWC, iCur, j, opMask, pIdx); + while( pTerm && pTerm->prereqRight ) pTerm = whereScanNext(&scan); if( pTerm==0 ) break; testcase( pTerm->eOperator & WO_IS ); pLoop->aLTerm[j] = pTerm; @@ -153881,8 +157104,14 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; } + if( scan.iEquiv>1 ) pLoop->wsFlags |= WHERE_TRANSCONS; #ifdef SQLITE_DEBUG pLoop->cId = '0'; +#endif +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace ){ + sqlite3DebugPrintf("whereShortCut() used to compute solution\n"); + } #endif return 1; } @@ -153937,6 +157166,150 @@ static void showAllWhereLoops(WhereInfo *pWInfo, WhereClause *pWC){ # define WHERETRACE_ALL_LOOPS(W,C) #endif +/* Attempt to omit tables from a join that do not affect the result. +** For a table to not affect the result, the following must be true: +** +** 1) The query must not be an aggregate. +** 2) The table must be the RHS of a LEFT JOIN. +** 3) Either the query must be DISTINCT, or else the ON or USING clause +** must contain a constraint that limits the scan of the table to +** at most a single row. +** 4) The table must not be referenced by any part of the query apart +** from its own USING or ON clause. +** +** For example, given: +** +** CREATE TABLE t1(ipk INTEGER PRIMARY KEY, v1); +** CREATE TABLE t2(ipk INTEGER PRIMARY KEY, v2); +** CREATE TABLE t3(ipk INTEGER PRIMARY KEY, v3); +** +** then table t2 can be omitted from the following: +** +** SELECT v1, v3 FROM t1 +** LEFT JOIN t2 ON (t1.ipk=t2.ipk) +** LEFT JOIN t3 ON (t1.ipk=t3.ipk) +** +** or from: +** +** SELECT DISTINCT v1, v3 FROM t1 +** LEFT JOIN t2 +** LEFT JOIN t3 ON (t1.ipk=t3.ipk) +*/ +static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( + WhereInfo *pWInfo, + Bitmask notReady +){ + int i; + Bitmask tabUsed; + + /* Preconditions checked by the caller */ + assert( pWInfo->nLevel>=2 ); + assert( OptimizationEnabled(pWInfo->pParse->db, SQLITE_OmitNoopJoin) ); + + /* These two preconditions checked by the caller combine to guarantee + ** condition (1) of the header comment */ + assert( pWInfo->pResultSet!=0 ); + assert( 0==(pWInfo->wctrlFlags & WHERE_AGG_DISTINCT) ); + + tabUsed = sqlite3WhereExprListUsage(&pWInfo->sMaskSet, pWInfo->pResultSet); + if( pWInfo->pOrderBy ){ + tabUsed |= sqlite3WhereExprListUsage(&pWInfo->sMaskSet, pWInfo->pOrderBy); + } + for(i=pWInfo->nLevel-1; i>=1; i--){ + WhereTerm *pTerm, *pEnd; + SrcItem *pItem; + WhereLoop *pLoop; + pLoop = pWInfo->a[i].pWLoop; + pItem = &pWInfo->pTabList->a[pLoop->iTab]; + if( (pItem->fg.jointype & JT_LEFT)==0 ) continue; + if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)==0 + && (pLoop->wsFlags & WHERE_ONEROW)==0 + ){ + continue; + } + if( (tabUsed & pLoop->maskSelf)!=0 ) continue; + pEnd = pWInfo->sWC.a + pWInfo->sWC.nTerm; + for(pTerm=pWInfo->sWC.a; pTermprereqAll & pLoop->maskSelf)!=0 ){ + if( !ExprHasProperty(pTerm->pExpr, EP_FromJoin) + || pTerm->pExpr->w.iRightJoinTable!=pItem->iCursor + ){ + break; + } + } + } + if( pTerm drop loop %c not used\n", pLoop->cId)); + notReady &= ~pLoop->maskSelf; + for(pTerm=pWInfo->sWC.a; pTermprereqAll & pLoop->maskSelf)!=0 ){ + pTerm->wtFlags |= TERM_CODED; + } + } + if( i!=pWInfo->nLevel-1 ){ + int nByte = (pWInfo->nLevel-1-i) * sizeof(WhereLevel); + memmove(&pWInfo->a[i], &pWInfo->a[i+1], nByte); + } + pWInfo->nLevel--; + assert( pWInfo->nLevel>0 ); + } + return notReady; +} + +/* +** Check to see if there are any SEARCH loops that might benefit from +** using a Bloom filter. Consider a Bloom filter if: +** +** (1) The SEARCH happens more than N times where N is the number +** of rows in the table that is being considered for the Bloom +** filter. +** (2) Some searches are expected to find zero rows. (This is determined +** by the WHERE_SELFCULL flag on the term.) +** (3) Bloom-filter processing is not disabled. (Checked by the +** caller.) +** (4) The size of the table being searched is known by ANALYZE. +** +** This block of code merely checks to see if a Bloom filter would be +** appropriate, and if so sets the WHERE_BLOOMFILTER flag on the +** WhereLoop. The implementation of the Bloom filter comes further +** down where the code for each WhereLoop is generated. +*/ +static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( + const WhereInfo *pWInfo +){ + int i; + LogEst nSearch; + + assert( pWInfo->nLevel>=2 ); + assert( OptimizationEnabled(pWInfo->pParse->db, SQLITE_BloomFilter) ); + nSearch = pWInfo->a[0].pWLoop->nOut; + for(i=1; inLevel; i++){ + WhereLoop *pLoop = pWInfo->a[i].pWLoop; + const unsigned int reqFlags = (WHERE_SELFCULL|WHERE_COLUMN_EQ); + if( (pLoop->wsFlags & reqFlags)==reqFlags + /* vvvvvv--- Always the case if WHERE_COLUMN_EQ is defined */ + && ALWAYS((pLoop->wsFlags & (WHERE_IPK|WHERE_INDEXED))!=0) + ){ + SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; + Table *pTab = pItem->pTab; + pTab->tabFlags |= TF_StatsUsed; + if( nSearch > pTab->nRowLogEst + && (pTab->tabFlags & TF_HasStat1)!=0 + ){ + testcase( pItem->fg.jointype & JT_LEFT ); + pLoop->wsFlags |= WHERE_BLOOMFILTER; + pLoop->wsFlags &= ~WHERE_IDX_ONLY; + WHERETRACE(0xffff, ( + "-> use Bloom-filter on loop %c because there are ~%.1e " + "lookups into %s which has only ~%.1e rows\n", + pLoop->cId, (double)sqlite3LogEstToInt(nSearch), pTab->zName, + (double)sqlite3LogEstToInt(pTab->nRowLogEst))); + } + } + nSearch += pLoop->nOut; + } +} + /* ** Generate the beginning of the loop used for WHERE clause processing. ** The return value is a pointer to an opaque structure that contains @@ -154031,6 +157404,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( Expr *pWhere, /* The WHERE clause */ ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */ ExprList *pResultSet, /* Query result set. Req'd for DISTINCT */ + Select *pLimit, /* Use this LIMIT/OFFSET clause, if any */ u16 wctrlFlags, /* The WHERE_* flags defined in sqliteInt.h */ int iAuxArg /* If WHERE_OR_SUBCLAUSE is set, index cursor number ** If WHERE_USE_LIMIT, then the limit amount */ @@ -154065,13 +157439,6 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */ testcase( pOrderBy && pOrderBy->nExpr==BMS-1 ); if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0; - sWLB.pOrderBy = pOrderBy; - - /* Disable the DISTINCT optimization if SQLITE_DistinctOpt is set via - ** sqlite3_test_ctrl(SQLITE_TESTCTRL_OPTIMIZATIONS,...) */ - if( OptimizationDisabled(db, SQLITE_DistinctOpt) ){ - wctrlFlags &= ~WHERE_WANT_DISTINCT; - } /* The number of tables in the FROM clause is limited by the number of ** bits in a Bitmask @@ -154114,11 +157481,18 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pWInfo->wctrlFlags = wctrlFlags; pWInfo->iLimit = iAuxArg; pWInfo->savedNQueryLoop = pParse->nQueryLoop; +#ifndef SQLITE_OMIT_VIRTUALTABLE + pWInfo->pLimit = pLimit; +#endif memset(&pWInfo->nOBSat, 0, offsetof(WhereInfo,sWC) - offsetof(WhereInfo,nOBSat)); memset(&pWInfo->a[0], 0, sizeof(WhereLoop)+nTabList*sizeof(WhereLevel)); assert( pWInfo->eOnePass==ONEPASS_OFF ); /* ONEPASS defaults to OFF */ pMaskSet = &pWInfo->sMaskSet; + pMaskSet->n = 0; + pMaskSet->ix[0] = -99; /* Initialize ix[0] to a value that can never be + ** a valid cursor number, to avoid an initial + ** test for pMaskSet->n==0 in sqlite3WhereGetMask() */ sWLB.pWInfo = pWInfo; sWLB.pWC = &pWInfo->sWC; sWLB.pNew = (WhereLoop*)(((char*)pWInfo)+nByteWInfo); @@ -154131,7 +157505,6 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* Split the WHERE clause into separate subexpressions where each ** subexpression is separated by an AND operator. */ - initMaskSet(pMaskSet); sqlite3WhereClauseInit(&pWInfo->sWC, pWInfo); sqlite3WhereSplit(&pWInfo->sWC, pWhere, TK_AND); @@ -154139,7 +157512,9 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( */ if( nTabList==0 ){ if( pOrderBy ) pWInfo->nOBSat = pOrderBy->nExpr; - if( wctrlFlags & WHERE_WANT_DISTINCT ){ + if( (wctrlFlags & WHERE_WANT_DISTINCT)!=0 + && OptimizationEnabled(db, SQLITE_DistinctOpt) + ){ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; } ExplainQueryPlan((pParse, 0, "SCAN CONSTANT ROW")); @@ -154177,6 +157552,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* Analyze all of the subexpressions. */ sqlite3WhereExprAnalyze(pTabList, &pWInfo->sWC); + sqlite3WhereAddLimit(&pWInfo->sWC, pLimit); if( db->mallocFailed ) goto whereBeginError; /* Special case: WHERE terms that do not refer to any tables in the join @@ -154190,7 +157566,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** FROM ... WHERE random()>0; -- eval random() once per row ** FROM ... WHERE (SELECT random())>0; -- eval random() once overall */ - for(ii=0; iinTerm; ii++){ + for(ii=0; iinBase; ii++){ WhereTerm *pT = &sWLB.pWC->a[ii]; if( pT->wtFlags & TERM_VIRTUAL ) continue; if( pT->prereqAll==0 && (nTabList==0 || exprIsDeterministic(pT->pExpr)) ){ @@ -154200,7 +157576,12 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } if( wctrlFlags & WHERE_WANT_DISTINCT ){ - if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pResultSet) ){ + if( OptimizationDisabled(db, SQLITE_DistinctOpt) ){ + /* Disable the DISTINCT optimization if SQLITE_DistinctOpt is set via + ** sqlite3_test_ctrl(SQLITE_TESTCTRL_OPTIMIZATIONS,...) */ + wctrlFlags &= ~WHERE_WANT_DISTINCT; + pWInfo->wctrlFlags &= ~WHERE_WANT_DISTINCT; + }else if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pResultSet) ){ /* The DISTINCT marking is pointless. Ignore it. */ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; }else if( pOrderBy==0 ){ @@ -154271,9 +157652,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){ pWInfo->revMask = ALLBITS; } - if( pParse->nErr || db->mallocFailed ){ + if( pParse->nErr ){ goto whereBeginError; } + assert( db->mallocFailed==0 ); #ifdef WHERETRACE_ENABLED if( sqlite3WhereTrace ){ sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut); @@ -154301,34 +157683,15 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } #endif - /* Attempt to omit tables from the join that do not affect the result. - ** For a table to not affect the result, the following must be true: - ** - ** 1) The query must not be an aggregate. - ** 2) The table must be the RHS of a LEFT JOIN. - ** 3) Either the query must be DISTINCT, or else the ON or USING clause - ** must contain a constraint that limits the scan of the table to - ** at most a single row. - ** 4) The table must not be referenced by any part of the query apart - ** from its own USING or ON clause. - ** - ** For example, given: - ** - ** CREATE TABLE t1(ipk INTEGER PRIMARY KEY, v1); - ** CREATE TABLE t2(ipk INTEGER PRIMARY KEY, v2); - ** CREATE TABLE t3(ipk INTEGER PRIMARY KEY, v3); + /* Attempt to omit tables from a join that do not affect the result. + ** See the comment on whereOmitNoopJoin() for further information. ** - ** then table t2 can be omitted from the following: - ** - ** SELECT v1, v3 FROM t1 - ** LEFT JOIN t2 ON (t1.ipk=t2.ipk) - ** LEFT JOIN t3 ON (t1.ipk=t3.ipk) - ** - ** or from: - ** - ** SELECT DISTINCT v1, v3 FROM t1 - ** LEFT JOIN t2 - ** LEFT JOIN t3 ON (t1.ipk=t3.ipk) + ** This query optimization is factored out into a separate "no-inline" + ** procedure to keep the sqlite3WhereBegin() procedure from becoming + ** too large. If sqlite3WhereBegin() becomes too large, that prevents + ** some C-compiler optimizers from in-lining the + ** sqlite3WhereCodeOneLoopStart() procedure, and it is important to + ** in-line sqlite3WhereCodeOneLoopStart() for performance reasons. */ notReady = ~(Bitmask)0; if( pWInfo->nLevel>=2 @@ -154336,49 +157699,20 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( && 0==(wctrlFlags & WHERE_AGG_DISTINCT) /* condition (1) above */ && OptimizationEnabled(db, SQLITE_OmitNoopJoin) ){ - int i; - Bitmask tabUsed = sqlite3WhereExprListUsage(pMaskSet, pResultSet); - if( sWLB.pOrderBy ){ - tabUsed |= sqlite3WhereExprListUsage(pMaskSet, sWLB.pOrderBy); - } - for(i=pWInfo->nLevel-1; i>=1; i--){ - WhereTerm *pTerm, *pEnd; - SrcItem *pItem; - pLoop = pWInfo->a[i].pWLoop; - pItem = &pWInfo->pTabList->a[pLoop->iTab]; - if( (pItem->fg.jointype & JT_LEFT)==0 ) continue; - if( (wctrlFlags & WHERE_WANT_DISTINCT)==0 - && (pLoop->wsFlags & WHERE_ONEROW)==0 - ){ - continue; - } - if( (tabUsed & pLoop->maskSelf)!=0 ) continue; - pEnd = sWLB.pWC->a + sWLB.pWC->nTerm; - for(pTerm=sWLB.pWC->a; pTermprereqAll & pLoop->maskSelf)!=0 ){ - if( !ExprHasProperty(pTerm->pExpr, EP_FromJoin) - || pTerm->pExpr->iRightJoinTable!=pItem->iCursor - ){ - break; - } - } - } - if( pTerm drop loop %c not used\n", pLoop->cId)); - notReady &= ~pLoop->maskSelf; - for(pTerm=sWLB.pWC->a; pTermprereqAll & pLoop->maskSelf)!=0 ){ - pTerm->wtFlags |= TERM_CODED; - } - } - if( i!=pWInfo->nLevel-1 ){ - int nByte = (pWInfo->nLevel-1-i) * sizeof(WhereLevel); - memmove(&pWInfo->a[i], &pWInfo->a[i+1], nByte); - } - pWInfo->nLevel--; - nTabList--; - } + notReady = whereOmitNoopJoin(pWInfo, notReady); + nTabList = pWInfo->nLevel; + assert( nTabList>0 ); + } + + /* Check to see if there are any SEARCH loops that might benefit from + ** using a Bloom filter. + */ + if( pWInfo->nLevel>=2 + && OptimizationEnabled(db, SQLITE_BloomFilter) + ){ + whereCheckIfBloomFilterIsUseful(pWInfo); } + #if defined(WHERETRACE_ENABLED) if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */ sqlite3DebugPrintf("---- WHERE clause at end of analysis:\n"); @@ -154439,7 +157773,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pTab = pTabItem->pTab; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); pLoop = pLevel->pWLoop; - if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ){ + if( (pTab->tabFlags & TF_Ephemeral)!=0 || IsView(pTab) ){ /* Do nothing */ }else #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -154465,6 +157799,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( pWInfo->eOnePass==ONEPASS_OFF && pTab->nColtabFlags & (TF_HasGenerated|TF_WithoutRowid))==0 + && (pLoop->wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))==0 ){ /* If we know that only a prefix of the record will be used, ** it is advantageous to reduce the "column count" field in @@ -154564,15 +157899,20 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( for(ii=0; iinErr ) goto whereBeginError; pLevel = &pWInfo->a[ii]; wsFlags = pLevel->pWLoop->wsFlags; + if( (wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))!=0 ){ + if( (wsFlags & WHERE_AUTO_INDEX)!=0 ){ #ifndef SQLITE_OMIT_AUTOMATIC_INDEX - if( (pLevel->pWLoop->wsFlags & WHERE_AUTO_INDEX)!=0 ){ - constructAutomaticIndex(pParse, &pWInfo->sWC, - &pTabList->a[pLevel->iFrom], notReady, pLevel); + constructAutomaticIndex(pParse, &pWInfo->sWC, + &pTabList->a[pLevel->iFrom], notReady, pLevel); +#endif + }else{ + sqlite3ConstructBloomFilter(pWInfo, ii, pLevel, notReady); + } if( db->mallocFailed ) goto whereBeginError; } -#endif addrExplain = sqlite3WhereExplainOneScan( pParse, pTabList, pLevel, wctrlFlags ); @@ -154620,6 +157960,26 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } #endif +#ifdef SQLITE_DEBUG +/* +** Return true if cursor iCur is opened by instruction k of the +** bytecode. Used inside of assert() only. +*/ +static int cursorIsOpen(Vdbe *v, int iCur, int k){ + while( k>=0 ){ + VdbeOp *pOp = sqlite3VdbeGetOp(v,k--); + if( pOp->p1!=iCur ) continue; + if( pOp->opcode==OP_Close ) return 0; + if( pOp->opcode==OP_OpenRead ) return 1; + if( pOp->opcode==OP_OpenWrite ) return 1; + if( pOp->opcode==OP_OpenDup ) return 1; + if( pOp->opcode==OP_OpenAutoindex ) return 1; + if( pOp->opcode==OP_OpenEphemeral ) return 1; + } + return 0; +} +#endif /* SQLITE_DEBUG */ + /* ** Generate the end of the WHERE loop. See comments on ** sqlite3WhereBegin() for additional information. @@ -154685,7 +158045,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ }else{ sqlite3VdbeResolveLabel(v, pLevel->addrCont); } - if( pLoop->wsFlags & WHERE_IN_ABLE && pLevel->u.in.nIn>0 ){ + if( (pLoop->wsFlags & WHERE_IN_ABLE)!=0 && pLevel->u.in.nIn>0 ){ struct InLoop *pIn; int j; sqlite3VdbeResolveLabel(v, pLevel->addrNxt); @@ -154754,8 +158114,14 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur); } if( (ws & WHERE_INDEXED) - || ((ws & WHERE_MULTI_OR) && pLevel->u.pCovidx) + || ((ws & WHERE_MULTI_OR) && pLevel->u.pCoveringIdx) ){ + if( ws & WHERE_MULTI_OR ){ + Index *pIx = pLevel->u.pCoveringIdx; + int iDb = sqlite3SchemaToIndex(db, pIx->pSchema); + sqlite3VdbeAddOp3(v, OP_ReopenIdx, pLevel->iIdxCur, pIx->tnum, iDb); + sqlite3VdbeSetP4KeyInfo(pParse, pIx); + } sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur); } if( pLevel->op==OP_Return ){ @@ -154802,7 +158168,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ ** created for the ONEPASS optimization. */ if( (pTab->tabFlags & TF_Ephemeral)==0 - && pTab->pSelect==0 + && !IsView(pTab) && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){ int ws = pLoop->wsFlags; @@ -154832,7 +158198,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ if( pLoop->wsFlags & (WHERE_INDEXED|WHERE_IDX_ONLY) ){ pIdx = pLoop->u.btree.pIndex; }else if( pLoop->wsFlags & WHERE_MULTI_OR ){ - pIdx = pLevel->u.pCovidx; + pIdx = pLevel->u.pCoveringIdx; } if( pIdx && !db->mallocFailed @@ -154866,6 +158232,11 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ ){ int x = pOp->p2; assert( pIdx->pTable==pTab ); +#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC + if( pOp->opcode==OP_Offset ){ + /* Do not need to translate the column number */ + }else +#endif if( !HasRowid(pTab) ){ Index *pPk = sqlite3PrimaryKeyIndex(pTab); x = pPk->aiColumn[x]; @@ -154879,9 +158250,22 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ pOp->p2 = x; pOp->p1 = pLevel->iIdxCur; OpcodeRewriteTrace(db, k, pOp); + }else{ + /* Unable to translate the table reference into an index + ** reference. Verify that this is harmless - that the + ** table being referenced really is open. + */ +#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC + assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 + || cursorIsOpen(v,pOp->p1,k) + || pOp->opcode==OP_Offset + ); +#else + assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 + || cursorIsOpen(v,pOp->p1,k) + ); +#endif } - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0 - || pWInfo->eOnePass ); }else if( pOp->opcode==OP_Rowid ){ pOp->p1 = pLevel->iIdxCur; pOp->opcode = OP_IdxRowid; @@ -155493,7 +158877,7 @@ static void noopValueFunc(sqlite3_context *p){ UNUSED_PARAMETER(p); /*no-op*/ } /* Window functions that use all window interfaces: xStep, xFinal, ** xValue, and xInverse */ #define WINDOWFUNCALL(name,nArg,extra) { \ - nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \ + nArg, (SQLITE_FUNC_BUILTIN|SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \ name ## StepFunc, name ## FinalizeFunc, name ## ValueFunc, \ name ## InvFunc, name ## Name, {0} \ } @@ -155501,7 +158885,7 @@ static void noopValueFunc(sqlite3_context *p){ UNUSED_PARAMETER(p); /*no-op*/ } /* Window functions that are implemented using bytecode and thus have ** no-op routines for their methods */ #define WINDOWFUNCNOOP(name,nArg,extra) { \ - nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \ + nArg, (SQLITE_FUNC_BUILTIN|SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \ noopStepFunc, noopValueFunc, noopValueFunc, \ noopStepFunc, name ## Name, {0} \ } @@ -155510,7 +158894,7 @@ static void noopValueFunc(sqlite3_context *p){ UNUSED_PARAMETER(p); /*no-op*/ } ** same routine for xFinalize and xValue and which never call ** xInverse. */ #define WINDOWFUNCX(name,nArg,extra) { \ - nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \ + nArg, (SQLITE_FUNC_BUILTIN|SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \ name ## StepFunc, name ## ValueFunc, name ## ValueFunc, \ noopStepFunc, name ## Name, {0} \ } @@ -155820,9 +159204,7 @@ static ExprList *exprListAppendList( if( bIntToNull ){ int iDummy; Expr *pSub; - for(pSub=pDup; ExprHasProperty(pSub, EP_Skip); pSub=pSub->pLeft){ - assert( pSub ); - } + pSub = sqlite3ExprSkipCollateAndLikely(pDup); if( sqlite3ExprIsInteger(pSub, &iDummy) ){ pSub->op = TK_NULL; pSub->flags &= ~(EP_IntValue|EP_IsTrue|EP_IsFalse); @@ -155855,7 +159237,8 @@ static int sqlite3WindowExtraAggFuncDepth(Walker *pWalker, Expr *pExpr){ static int disallowAggregatesInOrderByCb(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_AGG_FUNCTION && pExpr->pAggInfo==0 ){ - sqlite3ErrorMsg(pWalker->pParse, + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + sqlite3ErrorMsg(pWalker->pParse, "misuse of aggregate: %s()", pExpr->u.zToken); } return WRC_Continue; @@ -155870,7 +159253,11 @@ static int disallowAggregatesInOrderByCb(Walker *pWalker, Expr *pExpr){ */ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ int rc = SQLITE_OK; - if( p->pWin && p->pPrior==0 && ALWAYS((p->selFlags & SF_WinRewrite)==0) ){ + if( p->pWin + && p->pPrior==0 + && ALWAYS((p->selFlags & SF_WinRewrite)==0) + && ALWAYS(!IN_RENAME_OBJECT) + ){ Vdbe *v = sqlite3GetVdbe(pParse); sqlite3 *db = pParse->db; Select *pSub = 0; /* The subquery */ @@ -155943,7 +159330,10 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ ** window function - one for the accumulator, another for interim ** results. */ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){ - ExprList *pArgs = pWin->pOwner->x.pList; + ExprList *pArgs; + assert( ExprUseXList(pWin->pOwner) ); + assert( pWin->pFunc!=0 ); + pArgs = pWin->pOwner->x.pList; if( pWin->pFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){ selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist); pWin->iArgCol = (pSublist ? pSublist->nExpr : 0); @@ -155980,11 +159370,14 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ ("New window-function subquery in FROM clause of (%u/%p)\n", p->selId, p)); p->pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); + assert( pSub!=0 || p->pSrc==0 ); /* Due to db->mallocFailed test inside + ** of sqlite3DbMallocRawNN() called from + ** sqlite3SrcListAppend() */ if( p->pSrc ){ Table *pTab2; p->pSrc->a[0].pSelect = pSub; sqlite3SrcListAssignCursors(pParse, p->pSrc); - pSub->selFlags |= SF_Expanded; + pSub->selFlags |= SF_Expanded|SF_OrderByReqd; pTab2 = sqlite3ResultSetOfSelect(pParse, pSub, SQLITE_AFF_NONE); pSub->selFlags |= (selFlags & SF_Aggregate); if( pTab2==0 ){ @@ -156007,15 +159400,14 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ sqlite3SelectDelete(db, pSub); } if( db->mallocFailed ) rc = SQLITE_NOMEM; - sqlite3DbFree(db, pTab); - } - if( rc ){ - if( pParse->nErr==0 ){ - assert( pParse->db->mallocFailed ); - sqlite3ErrorToParser(pParse->db, SQLITE_NOMEM); - } + /* Defer deleting the temporary table pTab because if an error occurred, + ** there could still be references to that table embedded in the + ** result-set or ORDER BY clause of the SELECT statement p. */ + sqlite3ParserAddCleanup(pParse, sqlite3DbFree, pTab); } + + assert( rc==SQLITE_OK || pParse->nErr!=0 ); return rc; } @@ -156256,7 +159648,12 @@ SQLITE_PRIVATE void sqlite3WindowLink(Select *pSel, Window *pWin){ ** different, or 2 if it cannot be determined if the objects are identical ** or not. Identical window objects can be processed in a single scan. */ -SQLITE_PRIVATE int sqlite3WindowCompare(Parse *pParse, Window *p1, Window *p2, int bFilter){ +SQLITE_PRIVATE int sqlite3WindowCompare( + const Parse *pParse, + const Window *p1, + const Window *p2, + int bFilter +){ int res; if( NEVER(p1==0) || NEVER(p2==0) ) return 1; if( p1->eFrmType!=p2->eFrmType ) return 1; @@ -156328,8 +159725,11 @@ SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse *pParse, Select *pSelect){ ** regApp+1: integer value used to ensure keys are unique ** regApp+2: output of MakeRecord */ - ExprList *pList = pWin->pOwner->x.pList; - KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pList, 0, 0); + ExprList *pList; + KeyInfo *pKeyInfo; + assert( ExprUseXList(pWin->pOwner) ); + pList = pWin->pOwner->x.pList; + pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pList, 0, 0); pWin->csrApp = pParse->nTab++; pWin->regApp = pParse->nMem+1; pParse->nMem += 3; @@ -156417,7 +159817,9 @@ static void windowCheckValue(Parse *pParse, int reg, int eCond){ ** with the object passed as the only argument to this function. */ static int windowArgCount(Window *pWin){ - ExprList *pList = pWin->pOwner->x.pList; + const ExprList *pList; + assert( ExprUseXList(pWin->pOwner) ); + pList = pWin->pOwner->x.pList; return (pList ? pList->nExpr : 0); } @@ -156602,6 +160004,7 @@ static void windowAggStep( int addrIf = 0; if( pWin->pFilter ){ int regTmp; + assert( ExprUseXList(pWin->pOwner) ); assert( pWin->bExprArgs || !nArg ||nArg==pWin->pOwner->x.pList->nExpr ); assert( pWin->bExprArgs || nArg ||pWin->pOwner->x.pList==0 ); regTmp = sqlite3GetTempReg(pParse); @@ -156615,13 +160018,14 @@ static void windowAggStep( int iOp = sqlite3VdbeCurrentAddr(v); int iEnd; + assert( ExprUseXList(pWin->pOwner) ); nArg = pWin->pOwner->x.pList->nExpr; regArg = sqlite3GetTempRange(pParse, nArg); sqlite3ExprCodeExprList(pParse, pWin->pOwner->x.pList, regArg, 0, 0); for(iEnd=sqlite3VdbeCurrentAddr(v); iOpopcode==OP_Column && pOp->p1==pWin->iEphCsr ){ + if( pOp->opcode==OP_Column && pOp->p1==pMWin->iEphCsr ){ pOp->p1 = csr; } } @@ -156629,6 +160033,7 @@ static void windowAggStep( if( pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ CollSeq *pColl; assert( nArg>0 ); + assert( ExprUseXList(pWin->pOwner) ); pColl = sqlite3ExprNNCollSeq(pParse, pWin->pOwner->x.pList->a[0].pExpr); sqlite3VdbeAddOp4(v, OP_CollSeq, 0,0,0, (const char*)pColl, P4_COLLSEQ); } @@ -156814,6 +160219,7 @@ static void windowReturnOneRow(WindowCodeArg *p){ for(pWin=pMWin; pWin; pWin=pWin->pNextWin){ FuncDef *pFunc = pWin->pFunc; + assert( ExprUseXList(pWin->pOwner) ); if( pFunc->zName==nth_valueName || pFunc->zName==first_valueName ){ @@ -158149,10 +161555,7 @@ static void updateDeleteLimitError( } - /* Construct a new Expr object from a single identifier. Use the - ** new Expr to populate pOut. Set the span of pOut to be the identifier - ** that created the expression. - */ + /* Construct a new Expr object from a single token */ static Expr *tokenExpr(Parse *pParse, int op, Token t){ Expr *p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr)+t.n+1); if( p ){ @@ -158163,15 +161566,16 @@ static void updateDeleteLimitError( ExprClearVVAProperties(p); p->iAgg = -1; p->pLeft = p->pRight = 0; - p->x.pList = 0; p->pAggInfo = 0; - p->y.pTab = 0; + memset(&p->x, 0, sizeof(p->x)); + memset(&p->y, 0, sizeof(p->y)); p->op2 = 0; p->iTable = 0; p->iColumn = 0; p->u.zToken = (char*)&p[1]; memcpy(p->u.zToken, t.z, t.n); p->u.zToken[t.n] = 0; + p->w.iOfst = (int)(t.z - pParse->zTail); if( sqlite3Isquote(p->u.zToken[0]) ){ sqlite3DequoteExpr(p); } @@ -158251,8 +161655,8 @@ static void updateDeleteLimitError( #define TK_LP 22 #define TK_RP 23 #define TK_AS 24 -#define TK_WITHOUT 25 -#define TK_COMMA 26 +#define TK_COMMA 25 +#define TK_WITHOUT 26 #define TK_ABORT 27 #define TK_ACTION 28 #define TK_AFTER 29 @@ -158338,78 +161742,79 @@ static void updateDeleteLimitError( #define TK_SLASH 109 #define TK_REM 110 #define TK_CONCAT 111 -#define TK_COLLATE 112 -#define TK_BITNOT 113 -#define TK_ON 114 -#define TK_INDEXED 115 -#define TK_STRING 116 -#define TK_JOIN_KW 117 -#define TK_CONSTRAINT 118 -#define TK_DEFAULT 119 -#define TK_NULL 120 -#define TK_PRIMARY 121 -#define TK_UNIQUE 122 -#define TK_CHECK 123 -#define TK_REFERENCES 124 -#define TK_AUTOINCR 125 -#define TK_INSERT 126 -#define TK_DELETE 127 -#define TK_UPDATE 128 -#define TK_SET 129 -#define TK_DEFERRABLE 130 -#define TK_FOREIGN 131 -#define TK_DROP 132 -#define TK_UNION 133 -#define TK_ALL 134 -#define TK_EXCEPT 135 -#define TK_INTERSECT 136 -#define TK_SELECT 137 -#define TK_VALUES 138 -#define TK_DISTINCT 139 -#define TK_DOT 140 -#define TK_FROM 141 -#define TK_JOIN 142 -#define TK_USING 143 -#define TK_ORDER 144 -#define TK_GROUP 145 -#define TK_HAVING 146 -#define TK_LIMIT 147 -#define TK_WHERE 148 -#define TK_RETURNING 149 -#define TK_INTO 150 -#define TK_NOTHING 151 -#define TK_FLOAT 152 -#define TK_BLOB 153 -#define TK_INTEGER 154 -#define TK_VARIABLE 155 -#define TK_CASE 156 -#define TK_WHEN 157 -#define TK_THEN 158 -#define TK_ELSE 159 -#define TK_INDEX 160 -#define TK_ALTER 161 -#define TK_ADD 162 -#define TK_WINDOW 163 -#define TK_OVER 164 -#define TK_FILTER 165 -#define TK_COLUMN 166 -#define TK_AGG_FUNCTION 167 -#define TK_AGG_COLUMN 168 -#define TK_TRUEFALSE 169 -#define TK_ISNOT 170 -#define TK_FUNCTION 171 -#define TK_UMINUS 172 -#define TK_UPLUS 173 -#define TK_TRUTH 174 -#define TK_REGISTER 175 -#define TK_VECTOR 176 -#define TK_SELECT_COLUMN 177 -#define TK_IF_NULL_ROW 178 -#define TK_ASTERISK 179 -#define TK_SPAN 180 -#define TK_ERROR 181 -#define TK_SPACE 182 -#define TK_ILLEGAL 183 +#define TK_PTR 112 +#define TK_COLLATE 113 +#define TK_BITNOT 114 +#define TK_ON 115 +#define TK_INDEXED 116 +#define TK_STRING 117 +#define TK_JOIN_KW 118 +#define TK_CONSTRAINT 119 +#define TK_DEFAULT 120 +#define TK_NULL 121 +#define TK_PRIMARY 122 +#define TK_UNIQUE 123 +#define TK_CHECK 124 +#define TK_REFERENCES 125 +#define TK_AUTOINCR 126 +#define TK_INSERT 127 +#define TK_DELETE 128 +#define TK_UPDATE 129 +#define TK_SET 130 +#define TK_DEFERRABLE 131 +#define TK_FOREIGN 132 +#define TK_DROP 133 +#define TK_UNION 134 +#define TK_ALL 135 +#define TK_EXCEPT 136 +#define TK_INTERSECT 137 +#define TK_SELECT 138 +#define TK_VALUES 139 +#define TK_DISTINCT 140 +#define TK_DOT 141 +#define TK_FROM 142 +#define TK_JOIN 143 +#define TK_USING 144 +#define TK_ORDER 145 +#define TK_GROUP 146 +#define TK_HAVING 147 +#define TK_LIMIT 148 +#define TK_WHERE 149 +#define TK_RETURNING 150 +#define TK_INTO 151 +#define TK_NOTHING 152 +#define TK_FLOAT 153 +#define TK_BLOB 154 +#define TK_INTEGER 155 +#define TK_VARIABLE 156 +#define TK_CASE 157 +#define TK_WHEN 158 +#define TK_THEN 159 +#define TK_ELSE 160 +#define TK_INDEX 161 +#define TK_ALTER 162 +#define TK_ADD 163 +#define TK_WINDOW 164 +#define TK_OVER 165 +#define TK_FILTER 166 +#define TK_COLUMN 167 +#define TK_AGG_FUNCTION 168 +#define TK_AGG_COLUMN 169 +#define TK_TRUEFALSE 170 +#define TK_ISNOT 171 +#define TK_FUNCTION 172 +#define TK_UMINUS 173 +#define TK_UPLUS 174 +#define TK_TRUTH 175 +#define TK_REGISTER 176 +#define TK_VECTOR 177 +#define TK_SELECT_COLUMN 178 +#define TK_IF_NULL_ROW 179 +#define TK_ASTERISK 180 +#define TK_SPAN 181 +#define TK_ERROR 182 +#define TK_SPACE 183 +#define TK_ILLEGAL 184 #endif /**************** End token definitions ***************************************/ @@ -158469,29 +161874,30 @@ static void updateDeleteLimitError( #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 317 +#define YYNOCODE 319 #define YYACTIONTYPE unsigned short int #define YYWILDCARD 101 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - Window* yy49; - ExprList* yy70; - Select* yy81; - With* yy103; - struct FrameBound yy117; - struct {int value; int mask;} yy139; - SrcList* yy153; - TriggerStep* yy157; - Upsert* yy190; - struct TrigEvent yy262; - Cte* yy329; - int yy376; - Expr* yy404; - IdList* yy436; - const char* yy504; - u8 yy552; + TriggerStep* yy33; + Window* yy41; + Select* yy47; + SrcList* yy131; + struct TrigEvent yy180; + struct {int value; int mask;} yy231; + IdList* yy254; + u32 yy285; + ExprList* yy322; + Cte* yy385; + int yy394; + Upsert* yy444; + u8 yy516; + With* yy521; + const char* yy522; + Expr* yy528; + struct FrameBound yy595; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -158507,18 +161913,18 @@ typedef union { #define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 570 -#define YYNRULE 398 -#define YYNRULE_WITH_ACTION 337 -#define YYNTOKEN 184 -#define YY_MAX_SHIFT 569 -#define YY_MIN_SHIFTREDUCE 825 -#define YY_MAX_SHIFTREDUCE 1222 -#define YY_ERROR_ACTION 1223 -#define YY_ACCEPT_ACTION 1224 -#define YY_NO_ACTION 1225 -#define YY_MIN_REDUCE 1226 -#define YY_MAX_REDUCE 1623 +#define YYNSTATE 574 +#define YYNRULE 402 +#define YYNRULE_WITH_ACTION 340 +#define YYNTOKEN 185 +#define YY_MAX_SHIFT 573 +#define YY_MIN_SHIFTREDUCE 831 +#define YY_MAX_SHIFTREDUCE 1232 +#define YY_ERROR_ACTION 1233 +#define YY_ACCEPT_ACTION 1234 +#define YY_NO_ACTION 1235 +#define YY_MIN_REDUCE 1236 +#define YY_MAX_REDUCE 1637 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -158585,601 +161991,612 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2023) +#define YY_ACTTAB_COUNT (2070) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 563, 1295, 563, 1274, 168, 1257, 115, 112, 218, 373, - /* 10 */ 563, 1295, 374, 563, 488, 563, 115, 112, 218, 406, - /* 20 */ 1300, 1300, 41, 41, 41, 41, 514, 1504, 520, 1298, - /* 30 */ 1298, 959, 41, 41, 1260, 71, 71, 51, 51, 960, - /* 40 */ 557, 557, 557, 122, 123, 113, 1200, 1200, 1035, 1038, - /* 50 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 414, 406, - /* 60 */ 273, 273, 273, 273, 115, 112, 218, 115, 112, 218, - /* 70 */ 197, 268, 545, 560, 515, 560, 211, 563, 385, 248, - /* 80 */ 215, 521, 399, 122, 123, 113, 1200, 1200, 1035, 1038, - /* 90 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 540, 13, - /* 100 */ 13, 1259, 119, 119, 119, 119, 118, 118, 117, 117, - /* 110 */ 117, 116, 441, 1176, 419, 197, 446, 320, 512, 1539, - /* 120 */ 1545, 372, 1547, 6, 371, 1176, 1148, 394, 1148, 406, - /* 130 */ 1545, 534, 115, 112, 218, 1415, 99, 30, 121, 121, - /* 140 */ 121, 121, 119, 119, 119, 119, 118, 118, 117, 117, - /* 150 */ 117, 116, 441, 122, 123, 113, 1200, 1200, 1035, 1038, - /* 160 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 31, 1176, - /* 170 */ 1177, 1178, 241, 357, 1558, 501, 498, 497, 317, 124, - /* 180 */ 319, 1176, 1177, 1178, 1176, 496, 119, 119, 119, 119, - /* 190 */ 118, 118, 117, 117, 117, 116, 441, 139, 96, 406, - /* 200 */ 121, 121, 121, 121, 114, 117, 117, 117, 116, 441, - /* 210 */ 541, 1532, 119, 119, 119, 119, 118, 118, 117, 117, - /* 220 */ 117, 116, 441, 122, 123, 113, 1200, 1200, 1035, 1038, - /* 230 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 406, 441, - /* 240 */ 1176, 1177, 1178, 81, 439, 439, 439, 80, 119, 119, - /* 250 */ 119, 119, 118, 118, 117, 117, 117, 116, 441, 488, - /* 260 */ 1176, 318, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, - /* 270 */ 1028, 120, 120, 121, 121, 121, 121, 493, 1025, 1025, - /* 280 */ 1036, 1039, 119, 119, 119, 119, 118, 118, 117, 117, - /* 290 */ 117, 116, 441, 1584, 995, 1224, 1, 1, 569, 2, - /* 300 */ 1228, 1267, 137, 1503, 245, 305, 473, 140, 406, 860, - /* 310 */ 561, 1176, 914, 914, 1308, 359, 1176, 1177, 1178, 462, - /* 320 */ 330, 119, 119, 119, 119, 118, 118, 117, 117, 117, - /* 330 */ 116, 441, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, - /* 340 */ 1028, 120, 120, 121, 121, 121, 121, 328, 273, 273, - /* 350 */ 1015, 83, 1029, 425, 1564, 569, 2, 1228, 304, 554, - /* 360 */ 925, 560, 305, 944, 140, 860, 1006, 1176, 1177, 1178, - /* 370 */ 1005, 1308, 411, 213, 511, 229, 119, 119, 119, 119, - /* 380 */ 118, 118, 117, 117, 117, 116, 441, 519, 347, 116, - /* 390 */ 441, 119, 119, 119, 119, 118, 118, 117, 117, 117, - /* 400 */ 116, 441, 1005, 1005, 1007, 273, 273, 445, 563, 16, - /* 410 */ 16, 1590, 563, 1540, 563, 406, 1176, 6, 560, 344, - /* 420 */ 182, 118, 118, 117, 117, 117, 116, 441, 416, 142, - /* 430 */ 71, 71, 229, 563, 71, 71, 55, 55, 203, 122, - /* 440 */ 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, - /* 450 */ 121, 121, 121, 121, 217, 13, 13, 1176, 406, 568, - /* 460 */ 1400, 1228, 502, 137, 445, 168, 305, 545, 140, 1180, - /* 470 */ 424, 545, 1176, 1177, 1178, 1308, 544, 438, 437, 944, - /* 480 */ 513, 452, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, - /* 490 */ 1028, 120, 120, 121, 121, 121, 121, 315, 119, 119, - /* 500 */ 119, 119, 118, 118, 117, 117, 117, 116, 441, 273, - /* 510 */ 273, 1143, 416, 1176, 1177, 1178, 543, 563, 1143, 304, - /* 520 */ 554, 1561, 560, 1207, 1143, 1207, 1180, 1143, 406, 530, - /* 530 */ 421, 1143, 864, 183, 1143, 143, 229, 562, 32, 71, - /* 540 */ 71, 119, 119, 119, 119, 118, 118, 117, 117, 117, - /* 550 */ 116, 441, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, - /* 560 */ 1028, 120, 120, 121, 121, 121, 121, 406, 445, 241, - /* 570 */ 1176, 857, 501, 498, 497, 1176, 526, 189, 245, 538, - /* 580 */ 1539, 282, 496, 370, 6, 563, 529, 477, 5, 279, - /* 590 */ 1015, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, - /* 600 */ 120, 120, 121, 121, 121, 121, 1006, 13, 13, 1414, - /* 610 */ 1005, 119, 119, 119, 119, 118, 118, 117, 117, 117, - /* 620 */ 116, 441, 426, 273, 273, 1176, 1176, 1177, 1178, 1619, - /* 630 */ 392, 1176, 1177, 1178, 1176, 342, 560, 406, 525, 361, - /* 640 */ 430, 1161, 1005, 1005, 1007, 348, 411, 357, 1558, 488, - /* 650 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, - /* 660 */ 441, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, - /* 670 */ 120, 120, 121, 121, 121, 121, 406, 830, 831, 832, - /* 680 */ 1016, 1176, 1177, 1178, 396, 285, 148, 1312, 304, 554, - /* 690 */ 1176, 1177, 1178, 1467, 216, 3, 337, 137, 340, 560, - /* 700 */ 122, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, - /* 710 */ 120, 121, 121, 121, 121, 563, 504, 946, 273, 273, - /* 720 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, - /* 730 */ 441, 560, 1176, 427, 563, 451, 98, 13, 13, 259, - /* 740 */ 276, 356, 507, 351, 506, 246, 406, 361, 469, 1530, - /* 750 */ 1000, 347, 293, 304, 554, 1589, 71, 71, 889, 119, - /* 760 */ 119, 119, 119, 118, 118, 117, 117, 117, 116, 441, - /* 770 */ 122, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, - /* 780 */ 120, 121, 121, 121, 121, 406, 1143, 1078, 1176, 1177, - /* 790 */ 1178, 416, 1080, 300, 150, 995, 1080, 361, 361, 1143, - /* 800 */ 361, 378, 1143, 477, 563, 244, 243, 242, 1278, 122, - /* 810 */ 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, - /* 820 */ 121, 121, 121, 121, 563, 880, 13, 13, 483, 119, - /* 830 */ 119, 119, 119, 118, 118, 117, 117, 117, 116, 441, - /* 840 */ 1176, 191, 540, 563, 147, 149, 13, 13, 328, 457, - /* 850 */ 316, 1083, 1083, 485, 1537, 406, 505, 1530, 6, 1514, - /* 860 */ 284, 192, 1277, 145, 881, 71, 71, 488, 119, 119, - /* 870 */ 119, 119, 118, 118, 117, 117, 117, 116, 441, 122, - /* 880 */ 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, - /* 890 */ 121, 121, 121, 121, 563, 471, 1176, 1177, 1178, 406, - /* 900 */ 852, 327, 301, 462, 330, 1516, 270, 1530, 1530, 944, - /* 910 */ 1531, 1307, 313, 9, 842, 251, 71, 71, 477, 428, - /* 920 */ 146, 488, 38, 945, 101, 113, 1200, 1200, 1035, 1038, - /* 930 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 119, 119, - /* 940 */ 119, 119, 118, 118, 117, 117, 117, 116, 441, 563, - /* 950 */ 1197, 1099, 563, 436, 563, 1533, 563, 852, 1122, 1617, - /* 960 */ 454, 290, 1617, 546, 251, 1303, 1100, 267, 267, 281, - /* 970 */ 404, 70, 70, 460, 71, 71, 71, 71, 13, 13, - /* 980 */ 560, 1101, 119, 119, 119, 119, 118, 118, 117, 117, - /* 990 */ 117, 116, 441, 542, 104, 273, 273, 273, 273, 1197, - /* 1000 */ 217, 1468, 900, 471, 450, 563, 1473, 1197, 560, 447, - /* 1010 */ 560, 545, 901, 440, 406, 1058, 292, 274, 274, 198, - /* 1020 */ 547, 450, 449, 1473, 1475, 944, 455, 56, 56, 410, - /* 1030 */ 560, 1122, 1618, 379, 406, 1618, 404, 1120, 122, 123, - /* 1040 */ 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, 121, - /* 1050 */ 121, 121, 121, 1460, 406, 12, 1197, 1512, 122, 123, - /* 1060 */ 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, 121, - /* 1070 */ 121, 121, 121, 308, 471, 126, 359, 286, 122, 111, - /* 1080 */ 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, 121, - /* 1090 */ 121, 121, 121, 309, 450, 471, 1473, 119, 119, 119, - /* 1100 */ 119, 118, 118, 117, 117, 117, 116, 441, 1176, 563, - /* 1110 */ 1120, 482, 563, 312, 433, 479, 197, 119, 119, 119, - /* 1120 */ 119, 118, 118, 117, 117, 117, 116, 441, 405, 12, - /* 1130 */ 536, 15, 15, 478, 43, 43, 509, 119, 119, 119, - /* 1140 */ 119, 118, 118, 117, 117, 117, 116, 441, 289, 535, - /* 1150 */ 294, 563, 294, 391, 1220, 438, 437, 406, 1154, 403, - /* 1160 */ 402, 1400, 920, 1204, 1176, 1177, 1178, 919, 1206, 291, - /* 1170 */ 1306, 1249, 412, 57, 57, 488, 1205, 563, 556, 412, - /* 1180 */ 1176, 1344, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, - /* 1190 */ 120, 120, 121, 121, 121, 121, 1400, 1143, 563, 44, - /* 1200 */ 44, 1207, 194, 1207, 273, 273, 1400, 461, 537, 1154, - /* 1210 */ 1143, 108, 555, 1143, 4, 391, 1121, 560, 1538, 335, - /* 1220 */ 58, 58, 6, 1246, 1099, 380, 1400, 376, 558, 1536, - /* 1230 */ 563, 422, 1221, 6, 304, 554, 1176, 1177, 1178, 1100, - /* 1240 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, - /* 1250 */ 441, 442, 59, 59, 1101, 516, 1535, 273, 273, 563, - /* 1260 */ 6, 563, 110, 552, 563, 528, 423, 413, 169, 548, - /* 1270 */ 560, 108, 555, 137, 4, 551, 484, 272, 215, 222, - /* 1280 */ 211, 60, 60, 61, 61, 98, 62, 62, 558, 273, - /* 1290 */ 273, 563, 1015, 467, 1221, 563, 434, 563, 106, 106, - /* 1300 */ 8, 920, 560, 273, 273, 107, 919, 442, 565, 564, - /* 1310 */ 563, 442, 1005, 45, 45, 464, 560, 46, 46, 47, - /* 1320 */ 47, 84, 202, 552, 1215, 404, 468, 563, 205, 304, - /* 1330 */ 554, 563, 49, 49, 563, 522, 404, 532, 563, 867, - /* 1340 */ 563, 105, 531, 103, 1005, 1005, 1007, 1008, 27, 50, - /* 1350 */ 50, 563, 1015, 63, 63, 475, 64, 64, 106, 106, - /* 1360 */ 65, 65, 14, 14, 17, 107, 563, 442, 565, 564, - /* 1370 */ 563, 303, 1005, 66, 66, 563, 226, 563, 959, 563, - /* 1380 */ 543, 404, 1196, 1343, 871, 278, 960, 456, 128, 128, - /* 1390 */ 563, 1065, 67, 67, 563, 206, 867, 52, 52, 68, - /* 1400 */ 68, 69, 69, 417, 1005, 1005, 1007, 1008, 27, 1563, - /* 1410 */ 1165, 444, 53, 53, 277, 1519, 156, 156, 307, 389, - /* 1420 */ 389, 388, 262, 386, 1165, 444, 839, 321, 277, 108, - /* 1430 */ 555, 523, 4, 389, 389, 388, 262, 386, 563, 223, - /* 1440 */ 839, 311, 326, 1492, 1117, 98, 558, 393, 1065, 310, - /* 1450 */ 563, 476, 563, 223, 563, 311, 879, 878, 1009, 277, - /* 1460 */ 157, 157, 463, 310, 389, 389, 388, 262, 386, 442, - /* 1470 */ 518, 839, 76, 76, 54, 54, 72, 72, 355, 225, - /* 1480 */ 563, 552, 275, 563, 223, 325, 311, 161, 354, 465, - /* 1490 */ 135, 563, 228, 225, 310, 532, 563, 206, 886, 887, - /* 1500 */ 533, 161, 129, 129, 135, 73, 73, 224, 962, 963, - /* 1510 */ 1015, 563, 287, 130, 130, 1009, 106, 106, 131, 131, - /* 1520 */ 563, 224, 563, 107, 225, 442, 565, 564, 997, 1276, - /* 1530 */ 1005, 250, 161, 127, 127, 135, 108, 555, 1077, 4, - /* 1540 */ 1077, 407, 155, 155, 154, 154, 304, 554, 1126, 563, - /* 1550 */ 1331, 563, 224, 558, 470, 407, 563, 250, 563, 1491, - /* 1560 */ 304, 554, 1005, 1005, 1007, 1008, 27, 563, 480, 332, - /* 1570 */ 448, 136, 136, 134, 134, 1340, 442, 336, 132, 132, - /* 1580 */ 133, 133, 563, 1076, 448, 1076, 407, 563, 552, 75, - /* 1590 */ 75, 304, 554, 339, 341, 343, 108, 555, 563, 4, - /* 1600 */ 1577, 299, 532, 563, 77, 77, 1291, 531, 472, 74, - /* 1610 */ 74, 250, 1275, 558, 350, 448, 331, 1015, 360, 98, - /* 1620 */ 42, 42, 1352, 106, 106, 48, 48, 1399, 494, 1327, - /* 1630 */ 107, 247, 442, 565, 564, 345, 442, 1005, 98, 1061, - /* 1640 */ 953, 917, 247, 250, 110, 1552, 550, 850, 552, 918, - /* 1650 */ 144, 1338, 110, 549, 1405, 1256, 1248, 1237, 1236, 1238, - /* 1660 */ 1571, 1324, 208, 390, 489, 265, 363, 200, 365, 1005, - /* 1670 */ 1005, 1007, 1008, 27, 11, 280, 221, 1015, 323, 474, - /* 1680 */ 1274, 367, 212, 106, 106, 924, 1386, 324, 288, 1381, - /* 1690 */ 107, 453, 442, 565, 564, 283, 329, 1005, 1391, 499, - /* 1700 */ 353, 1374, 1464, 108, 555, 1463, 4, 1574, 1390, 397, - /* 1710 */ 1215, 171, 254, 369, 383, 207, 195, 196, 1511, 553, - /* 1720 */ 558, 1509, 415, 1212, 100, 555, 83, 4, 204, 1005, - /* 1730 */ 1005, 1007, 1008, 27, 180, 166, 173, 219, 79, 82, - /* 1740 */ 458, 558, 175, 442, 35, 1387, 176, 459, 177, 178, - /* 1750 */ 492, 231, 96, 1469, 395, 552, 1393, 1392, 36, 466, - /* 1760 */ 1395, 184, 398, 481, 442, 1458, 235, 89, 1480, 487, - /* 1770 */ 266, 334, 237, 188, 490, 400, 552, 338, 238, 508, - /* 1780 */ 1239, 239, 1294, 1293, 1015, 1292, 1285, 429, 91, 871, - /* 1790 */ 106, 106, 1588, 213, 401, 1587, 431, 107, 1264, 442, - /* 1800 */ 565, 564, 1263, 352, 1005, 1015, 1262, 1586, 1557, 517, - /* 1810 */ 432, 106, 106, 1284, 297, 298, 358, 524, 107, 1335, - /* 1820 */ 442, 565, 564, 95, 1336, 1005, 252, 253, 435, 125, - /* 1830 */ 543, 1543, 10, 1444, 377, 1542, 1005, 1005, 1007, 1008, - /* 1840 */ 27, 97, 527, 375, 362, 102, 260, 364, 381, 1317, - /* 1850 */ 382, 1334, 366, 1245, 1333, 1316, 368, 1005, 1005, 1007, - /* 1860 */ 1008, 27, 1359, 1358, 34, 199, 1171, 566, 261, 263, - /* 1870 */ 264, 567, 1234, 158, 1229, 141, 295, 159, 1496, 302, - /* 1880 */ 1497, 1495, 1494, 160, 826, 209, 443, 201, 306, 210, - /* 1890 */ 78, 220, 1075, 138, 1073, 314, 162, 172, 1196, 227, - /* 1900 */ 174, 903, 322, 230, 1089, 179, 163, 164, 418, 408, - /* 1910 */ 409, 170, 181, 85, 86, 420, 87, 165, 1092, 88, - /* 1920 */ 233, 232, 1088, 151, 18, 234, 1081, 250, 333, 1209, - /* 1930 */ 185, 486, 236, 186, 37, 841, 491, 354, 240, 346, - /* 1940 */ 495, 187, 90, 869, 19, 20, 500, 503, 349, 92, - /* 1950 */ 167, 152, 296, 882, 93, 510, 94, 1159, 153, 1041, - /* 1960 */ 1128, 39, 214, 269, 1127, 271, 249, 952, 190, 947, - /* 1970 */ 110, 1149, 21, 7, 1153, 22, 1145, 23, 1147, 24, - /* 1980 */ 1133, 25, 1152, 33, 539, 193, 26, 1056, 98, 1042, - /* 1990 */ 1040, 1044, 1098, 1045, 1097, 256, 255, 28, 40, 257, - /* 2000 */ 1010, 851, 109, 29, 913, 559, 384, 387, 258, 1167, - /* 2010 */ 1166, 1225, 1225, 1225, 1579, 1225, 1225, 1225, 1225, 1225, - /* 2020 */ 1225, 1225, 1578, + /* 0 */ 566, 1307, 566, 1286, 201, 201, 566, 116, 112, 222, + /* 10 */ 566, 1307, 377, 566, 116, 112, 222, 397, 408, 409, + /* 20 */ 1260, 378, 1269, 41, 41, 41, 41, 1412, 1517, 71, + /* 30 */ 71, 967, 1258, 41, 41, 491, 71, 71, 272, 968, + /* 40 */ 298, 476, 298, 123, 124, 114, 1210, 1210, 1044, 1047, + /* 50 */ 1036, 1036, 121, 121, 122, 122, 122, 122, 543, 409, + /* 60 */ 1234, 1, 1, 573, 2, 1238, 548, 116, 112, 222, + /* 70 */ 309, 480, 142, 548, 1272, 524, 116, 112, 222, 1320, + /* 80 */ 417, 523, 547, 123, 124, 114, 1210, 1210, 1044, 1047, + /* 90 */ 1036, 1036, 121, 121, 122, 122, 122, 122, 424, 116, + /* 100 */ 112, 222, 120, 120, 120, 120, 119, 119, 118, 118, + /* 110 */ 118, 117, 113, 444, 277, 277, 277, 277, 560, 560, + /* 120 */ 560, 1558, 376, 1560, 1186, 375, 1157, 563, 1157, 563, + /* 130 */ 409, 1558, 537, 252, 219, 1553, 99, 141, 449, 6, + /* 140 */ 365, 233, 120, 120, 120, 120, 119, 119, 118, 118, + /* 150 */ 118, 117, 113, 444, 123, 124, 114, 1210, 1210, 1044, + /* 160 */ 1047, 1036, 1036, 121, 121, 122, 122, 122, 122, 138, + /* 170 */ 289, 1186, 1546, 448, 118, 118, 118, 117, 113, 444, + /* 180 */ 125, 1186, 1187, 1188, 144, 465, 334, 566, 150, 127, + /* 190 */ 444, 122, 122, 122, 122, 115, 120, 120, 120, 120, + /* 200 */ 119, 119, 118, 118, 118, 117, 113, 444, 454, 419, + /* 210 */ 13, 13, 215, 120, 120, 120, 120, 119, 119, 118, + /* 220 */ 118, 118, 117, 113, 444, 422, 308, 557, 1186, 1187, + /* 230 */ 1188, 441, 440, 409, 1271, 122, 122, 122, 122, 120, + /* 240 */ 120, 120, 120, 119, 119, 118, 118, 118, 117, 113, + /* 250 */ 444, 1543, 98, 1033, 1033, 1045, 1048, 123, 124, 114, + /* 260 */ 1210, 1210, 1044, 1047, 1036, 1036, 121, 121, 122, 122, + /* 270 */ 122, 122, 566, 406, 405, 1186, 566, 409, 1217, 319, + /* 280 */ 1217, 80, 81, 120, 120, 120, 120, 119, 119, 118, + /* 290 */ 118, 118, 117, 113, 444, 70, 70, 1186, 1604, 71, + /* 300 */ 71, 123, 124, 114, 1210, 1210, 1044, 1047, 1036, 1036, + /* 310 */ 121, 121, 122, 122, 122, 122, 120, 120, 120, 120, + /* 320 */ 119, 119, 118, 118, 118, 117, 113, 444, 1037, 210, + /* 330 */ 1186, 365, 1186, 1187, 1188, 245, 548, 399, 504, 501, + /* 340 */ 500, 108, 558, 138, 4, 516, 933, 433, 499, 217, + /* 350 */ 514, 522, 352, 879, 1186, 1187, 1188, 383, 561, 566, + /* 360 */ 120, 120, 120, 120, 119, 119, 118, 118, 118, 117, + /* 370 */ 113, 444, 277, 277, 16, 16, 1598, 441, 440, 153, + /* 380 */ 409, 445, 13, 13, 1279, 563, 1214, 1186, 1187, 1188, + /* 390 */ 1003, 1216, 264, 555, 1574, 186, 566, 427, 138, 1215, + /* 400 */ 308, 557, 472, 138, 123, 124, 114, 1210, 1210, 1044, + /* 410 */ 1047, 1036, 1036, 121, 121, 122, 122, 122, 122, 55, + /* 420 */ 55, 413, 1023, 507, 1217, 1186, 1217, 474, 106, 106, + /* 430 */ 1312, 1312, 1186, 171, 566, 384, 107, 380, 445, 568, + /* 440 */ 567, 430, 1543, 1013, 332, 549, 565, 263, 280, 360, + /* 450 */ 510, 355, 509, 250, 491, 308, 557, 71, 71, 351, + /* 460 */ 308, 557, 374, 120, 120, 120, 120, 119, 119, 118, + /* 470 */ 118, 118, 117, 113, 444, 1013, 1013, 1015, 1016, 27, + /* 480 */ 277, 277, 1186, 1187, 1188, 1152, 566, 528, 409, 1186, + /* 490 */ 1187, 1188, 348, 563, 548, 1260, 533, 517, 1152, 1516, + /* 500 */ 317, 1152, 285, 550, 485, 569, 566, 569, 482, 51, + /* 510 */ 51, 207, 123, 124, 114, 1210, 1210, 1044, 1047, 1036, + /* 520 */ 1036, 121, 121, 122, 122, 122, 122, 171, 1412, 13, + /* 530 */ 13, 409, 277, 277, 1186, 505, 119, 119, 118, 118, + /* 540 */ 118, 117, 113, 444, 429, 563, 518, 220, 515, 1552, + /* 550 */ 365, 546, 1186, 6, 532, 123, 124, 114, 1210, 1210, + /* 560 */ 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, 122, + /* 570 */ 145, 120, 120, 120, 120, 119, 119, 118, 118, 118, + /* 580 */ 117, 113, 444, 245, 566, 474, 504, 501, 500, 566, + /* 590 */ 1481, 1186, 1187, 1188, 1310, 1310, 499, 1186, 149, 425, + /* 600 */ 1186, 480, 409, 274, 365, 952, 872, 56, 56, 1186, + /* 610 */ 1187, 1188, 71, 71, 120, 120, 120, 120, 119, 119, + /* 620 */ 118, 118, 118, 117, 113, 444, 123, 124, 114, 1210, + /* 630 */ 1210, 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, + /* 640 */ 122, 409, 541, 1552, 83, 865, 98, 6, 928, 529, + /* 650 */ 848, 543, 151, 927, 1186, 1187, 1188, 1186, 1187, 1188, + /* 660 */ 290, 1543, 187, 1633, 395, 123, 124, 114, 1210, 1210, + /* 670 */ 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, 122, + /* 680 */ 566, 954, 566, 453, 953, 120, 120, 120, 120, 119, + /* 690 */ 119, 118, 118, 118, 117, 113, 444, 1152, 221, 1186, + /* 700 */ 331, 453, 452, 13, 13, 13, 13, 1003, 365, 463, + /* 710 */ 1152, 193, 409, 1152, 382, 1543, 1170, 32, 297, 474, + /* 720 */ 195, 1527, 5, 952, 120, 120, 120, 120, 119, 119, + /* 730 */ 118, 118, 118, 117, 113, 444, 123, 124, 114, 1210, + /* 740 */ 1210, 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, + /* 750 */ 122, 409, 1067, 419, 1186, 1024, 1186, 1187, 1188, 1186, + /* 760 */ 419, 332, 460, 320, 544, 1545, 442, 442, 442, 566, + /* 770 */ 3, 117, 113, 444, 453, 123, 124, 114, 1210, 1210, + /* 780 */ 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, 122, + /* 790 */ 1473, 566, 15, 15, 293, 120, 120, 120, 120, 119, + /* 800 */ 119, 118, 118, 118, 117, 113, 444, 1186, 566, 1486, + /* 810 */ 1412, 1186, 1187, 1188, 13, 13, 1186, 1187, 1188, 1544, + /* 820 */ 271, 271, 409, 286, 308, 557, 1008, 1486, 1488, 196, + /* 830 */ 288, 71, 71, 563, 120, 120, 120, 120, 119, 119, + /* 840 */ 118, 118, 118, 117, 113, 444, 123, 124, 114, 1210, + /* 850 */ 1210, 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, + /* 860 */ 122, 409, 201, 1087, 1186, 1187, 1188, 1324, 304, 1529, + /* 870 */ 388, 278, 278, 450, 564, 402, 922, 922, 566, 563, + /* 880 */ 566, 426, 491, 480, 563, 123, 124, 114, 1210, 1210, + /* 890 */ 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, 122, + /* 900 */ 1486, 71, 71, 13, 13, 120, 120, 120, 120, 119, + /* 910 */ 119, 118, 118, 118, 117, 113, 444, 566, 545, 566, + /* 920 */ 1577, 573, 2, 1238, 1092, 1092, 488, 1480, 309, 1525, + /* 930 */ 142, 324, 409, 836, 837, 838, 312, 1320, 305, 363, + /* 940 */ 43, 43, 57, 57, 120, 120, 120, 120, 119, 119, + /* 950 */ 118, 118, 118, 117, 113, 444, 123, 124, 114, 1210, + /* 960 */ 1210, 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, + /* 970 */ 122, 12, 277, 277, 566, 1152, 409, 572, 428, 1238, + /* 980 */ 465, 334, 296, 474, 309, 563, 142, 249, 1152, 308, + /* 990 */ 557, 1152, 321, 1320, 323, 491, 455, 71, 71, 233, + /* 1000 */ 283, 101, 114, 1210, 1210, 1044, 1047, 1036, 1036, 121, + /* 1010 */ 121, 122, 122, 122, 122, 120, 120, 120, 120, 119, + /* 1020 */ 119, 118, 118, 118, 117, 113, 444, 1108, 277, 277, + /* 1030 */ 1412, 448, 394, 1230, 439, 277, 277, 248, 247, 246, + /* 1040 */ 1319, 563, 1109, 313, 198, 294, 491, 1318, 563, 464, + /* 1050 */ 566, 1427, 394, 1130, 1023, 233, 414, 1110, 295, 120, + /* 1060 */ 120, 120, 120, 119, 119, 118, 118, 118, 117, 113, + /* 1070 */ 444, 1014, 104, 71, 71, 1013, 322, 496, 908, 566, + /* 1080 */ 277, 277, 277, 277, 1108, 1261, 415, 448, 909, 361, + /* 1090 */ 1571, 1315, 409, 563, 952, 563, 9, 202, 255, 1109, + /* 1100 */ 316, 487, 44, 44, 249, 559, 415, 1013, 1013, 1015, + /* 1110 */ 443, 1231, 409, 1603, 1110, 897, 123, 124, 114, 1210, + /* 1120 */ 1210, 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, + /* 1130 */ 122, 1231, 409, 1207, 215, 554, 123, 124, 114, 1210, + /* 1140 */ 1210, 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, + /* 1150 */ 122, 1131, 1631, 470, 1631, 255, 123, 111, 114, 1210, + /* 1160 */ 1210, 1044, 1047, 1036, 1036, 121, 121, 122, 122, 122, + /* 1170 */ 122, 1131, 1632, 414, 1632, 120, 120, 120, 120, 119, + /* 1180 */ 119, 118, 118, 118, 117, 113, 444, 221, 209, 351, + /* 1190 */ 1207, 1207, 147, 1426, 491, 120, 120, 120, 120, 119, + /* 1200 */ 119, 118, 118, 118, 117, 113, 444, 1256, 539, 519, + /* 1210 */ 888, 551, 952, 12, 566, 120, 120, 120, 120, 119, + /* 1220 */ 119, 118, 118, 118, 117, 113, 444, 538, 566, 860, + /* 1230 */ 1129, 361, 1571, 346, 1356, 409, 1163, 58, 58, 339, + /* 1240 */ 1355, 508, 277, 277, 277, 277, 277, 277, 1207, 889, + /* 1250 */ 1129, 59, 59, 459, 363, 563, 566, 563, 96, 563, + /* 1260 */ 124, 114, 1210, 1210, 1044, 1047, 1036, 1036, 121, 121, + /* 1270 */ 122, 122, 122, 122, 566, 1412, 566, 281, 1186, 60, + /* 1280 */ 60, 110, 392, 392, 391, 266, 389, 860, 1163, 845, + /* 1290 */ 566, 481, 566, 436, 341, 1152, 344, 61, 61, 62, + /* 1300 */ 62, 967, 227, 1550, 315, 431, 540, 6, 1152, 968, + /* 1310 */ 566, 1152, 314, 45, 45, 46, 46, 512, 120, 120, + /* 1320 */ 120, 120, 119, 119, 118, 118, 118, 117, 113, 444, + /* 1330 */ 416, 173, 1532, 47, 47, 1186, 1187, 1188, 108, 558, + /* 1340 */ 325, 4, 229, 1551, 928, 566, 437, 6, 566, 927, + /* 1350 */ 164, 566, 1290, 137, 1190, 561, 566, 1549, 566, 1089, + /* 1360 */ 566, 6, 566, 1089, 531, 566, 868, 8, 49, 49, + /* 1370 */ 228, 50, 50, 566, 63, 63, 566, 457, 445, 64, + /* 1380 */ 64, 65, 65, 14, 14, 66, 66, 407, 129, 129, + /* 1390 */ 555, 566, 458, 566, 1505, 486, 67, 67, 566, 52, + /* 1400 */ 52, 546, 407, 467, 535, 410, 226, 1023, 566, 534, + /* 1410 */ 308, 557, 1190, 407, 68, 68, 69, 69, 566, 1023, + /* 1420 */ 566, 53, 53, 868, 1014, 106, 106, 525, 1013, 566, + /* 1430 */ 1504, 159, 159, 107, 451, 445, 568, 567, 471, 307, + /* 1440 */ 1013, 160, 160, 76, 76, 566, 1548, 466, 407, 407, + /* 1450 */ 6, 1225, 54, 54, 478, 276, 219, 566, 887, 886, + /* 1460 */ 1013, 1013, 1015, 84, 206, 1206, 230, 282, 72, 72, + /* 1470 */ 329, 483, 1013, 1013, 1015, 1016, 27, 1576, 1174, 447, + /* 1480 */ 130, 130, 281, 148, 105, 38, 103, 392, 392, 391, + /* 1490 */ 266, 389, 566, 1126, 845, 396, 566, 108, 558, 566, + /* 1500 */ 4, 311, 566, 30, 17, 566, 279, 227, 566, 315, + /* 1510 */ 108, 558, 468, 4, 561, 73, 73, 314, 566, 157, + /* 1520 */ 157, 566, 131, 131, 526, 132, 132, 561, 128, 128, + /* 1530 */ 566, 158, 158, 566, 31, 291, 566, 445, 330, 521, + /* 1540 */ 98, 152, 152, 420, 136, 136, 1005, 229, 254, 555, + /* 1550 */ 445, 479, 336, 135, 135, 164, 133, 133, 137, 134, + /* 1560 */ 134, 875, 555, 535, 566, 473, 566, 254, 536, 475, + /* 1570 */ 335, 254, 98, 894, 895, 228, 535, 566, 1023, 566, + /* 1580 */ 1074, 534, 210, 232, 106, 106, 1352, 75, 75, 77, + /* 1590 */ 77, 1023, 107, 340, 445, 568, 567, 106, 106, 1013, + /* 1600 */ 74, 74, 42, 42, 566, 107, 343, 445, 568, 567, + /* 1610 */ 410, 497, 1013, 251, 359, 308, 557, 1135, 349, 875, + /* 1620 */ 98, 1070, 345, 251, 358, 1591, 347, 48, 48, 1017, + /* 1630 */ 1303, 1013, 1013, 1015, 1016, 27, 1289, 1287, 1074, 451, + /* 1640 */ 961, 925, 254, 110, 1013, 1013, 1015, 1016, 27, 1174, + /* 1650 */ 447, 970, 971, 281, 108, 558, 1288, 4, 392, 392, + /* 1660 */ 391, 266, 389, 1343, 1086, 845, 1086, 1085, 858, 1085, + /* 1670 */ 146, 561, 926, 354, 110, 303, 364, 553, 227, 1364, + /* 1680 */ 315, 108, 558, 1411, 4, 1339, 492, 1017, 314, 1350, + /* 1690 */ 1565, 552, 1417, 1268, 445, 204, 1259, 1247, 561, 1246, + /* 1700 */ 1248, 1584, 269, 1336, 367, 369, 555, 371, 11, 212, + /* 1710 */ 393, 225, 1393, 284, 1398, 456, 287, 327, 229, 328, + /* 1720 */ 292, 445, 1386, 216, 333, 1403, 164, 477, 373, 137, + /* 1730 */ 1402, 400, 502, 555, 1286, 1023, 357, 1477, 199, 1587, + /* 1740 */ 211, 106, 106, 932, 1476, 1225, 228, 556, 175, 107, + /* 1750 */ 200, 445, 568, 567, 258, 387, 1013, 1524, 1522, 223, + /* 1760 */ 1222, 418, 1023, 83, 208, 79, 82, 184, 106, 106, + /* 1770 */ 1482, 169, 177, 461, 179, 462, 107, 1399, 445, 568, + /* 1780 */ 567, 410, 180, 1013, 495, 181, 308, 557, 1013, 1013, + /* 1790 */ 1015, 1016, 27, 182, 35, 235, 100, 558, 398, 4, + /* 1800 */ 96, 1405, 1404, 36, 484, 469, 1407, 188, 401, 1471, + /* 1810 */ 451, 89, 1493, 561, 239, 1013, 1013, 1015, 1016, 27, + /* 1820 */ 490, 338, 270, 241, 192, 342, 493, 242, 403, 1249, + /* 1830 */ 243, 511, 432, 1297, 1306, 91, 445, 1305, 1304, 879, + /* 1840 */ 217, 434, 435, 1570, 1276, 1602, 520, 1601, 555, 301, + /* 1850 */ 527, 404, 1275, 302, 356, 1274, 1600, 95, 1347, 366, + /* 1860 */ 1296, 362, 1348, 368, 256, 257, 1556, 1555, 438, 1346, + /* 1870 */ 370, 126, 1345, 10, 1371, 546, 381, 1023, 102, 1457, + /* 1880 */ 97, 530, 34, 106, 106, 570, 1180, 372, 265, 1329, + /* 1890 */ 379, 107, 203, 445, 568, 567, 1328, 385, 1013, 1370, + /* 1900 */ 386, 267, 268, 571, 1244, 161, 1239, 162, 1509, 1510, + /* 1910 */ 1508, 143, 1507, 299, 832, 213, 214, 78, 446, 205, + /* 1920 */ 310, 306, 163, 224, 1084, 140, 1082, 318, 165, 176, + /* 1930 */ 1013, 1013, 1015, 1016, 27, 178, 1206, 231, 911, 234, + /* 1940 */ 326, 1098, 183, 421, 166, 167, 411, 185, 85, 423, + /* 1950 */ 412, 86, 174, 87, 168, 88, 1101, 236, 1097, 237, + /* 1960 */ 154, 18, 238, 254, 337, 1219, 489, 1090, 240, 190, + /* 1970 */ 37, 847, 189, 494, 358, 244, 350, 506, 191, 877, + /* 1980 */ 90, 498, 19, 20, 503, 92, 353, 890, 300, 170, + /* 1990 */ 155, 93, 513, 94, 1168, 156, 1050, 1137, 39, 218, + /* 2000 */ 273, 275, 1136, 960, 194, 955, 110, 1154, 1158, 253, + /* 2010 */ 7, 1162, 1156, 21, 22, 1161, 1142, 23, 24, 25, + /* 2020 */ 33, 542, 26, 260, 197, 98, 1065, 1051, 1049, 1053, + /* 2030 */ 1107, 1054, 1106, 259, 28, 40, 562, 1018, 859, 109, + /* 2040 */ 29, 921, 390, 1176, 172, 139, 1175, 1235, 261, 1235, + /* 2050 */ 1235, 1235, 1235, 1235, 1235, 1235, 1235, 262, 1235, 1235, + /* 2060 */ 1235, 1235, 1235, 1235, 1235, 1235, 1235, 1235, 1593, 1592, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 192, 221, 192, 223, 192, 214, 272, 273, 274, 217, - /* 10 */ 192, 231, 217, 192, 192, 192, 272, 273, 274, 19, - /* 20 */ 233, 234, 214, 215, 214, 215, 203, 293, 203, 233, - /* 30 */ 234, 31, 214, 215, 214, 214, 215, 214, 215, 39, - /* 40 */ 208, 209, 210, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 236, 19, - /* 60 */ 237, 238, 237, 238, 272, 273, 274, 272, 273, 274, - /* 70 */ 192, 211, 251, 250, 251, 250, 26, 192, 200, 254, - /* 80 */ 255, 260, 204, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 192, 214, - /* 100 */ 215, 214, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 112, 59, 229, 192, 294, 16, 306, 307, - /* 120 */ 312, 313, 312, 311, 314, 59, 86, 204, 88, 19, - /* 130 */ 312, 313, 272, 273, 274, 271, 26, 22, 54, 55, - /* 140 */ 56, 57, 102, 103, 104, 105, 106, 107, 108, 109, - /* 150 */ 110, 111, 112, 43, 44, 45, 46, 47, 48, 49, - /* 160 */ 50, 51, 52, 53, 54, 55, 56, 57, 53, 115, - /* 170 */ 116, 117, 118, 309, 310, 121, 122, 123, 77, 69, - /* 180 */ 79, 115, 116, 117, 59, 131, 102, 103, 104, 105, - /* 190 */ 106, 107, 108, 109, 110, 111, 112, 72, 148, 19, - /* 200 */ 54, 55, 56, 57, 58, 108, 109, 110, 111, 112, - /* 210 */ 304, 305, 102, 103, 104, 105, 106, 107, 108, 109, - /* 220 */ 110, 111, 112, 43, 44, 45, 46, 47, 48, 49, - /* 230 */ 50, 51, 52, 53, 54, 55, 56, 57, 19, 112, - /* 240 */ 115, 116, 117, 24, 208, 209, 210, 67, 102, 103, - /* 250 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 192, - /* 260 */ 59, 160, 43, 44, 45, 46, 47, 48, 49, 50, - /* 270 */ 51, 52, 53, 54, 55, 56, 57, 19, 46, 47, - /* 280 */ 48, 49, 102, 103, 104, 105, 106, 107, 108, 109, - /* 290 */ 110, 111, 112, 213, 73, 184, 185, 186, 187, 188, - /* 300 */ 189, 221, 81, 236, 46, 194, 192, 196, 19, 59, - /* 310 */ 133, 59, 135, 136, 203, 192, 115, 116, 117, 127, - /* 320 */ 128, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 330 */ 111, 112, 43, 44, 45, 46, 47, 48, 49, 50, - /* 340 */ 51, 52, 53, 54, 55, 56, 57, 126, 237, 238, - /* 350 */ 100, 150, 120, 230, 186, 187, 188, 189, 137, 138, - /* 360 */ 108, 250, 194, 26, 196, 115, 116, 115, 116, 117, - /* 370 */ 120, 203, 114, 164, 165, 264, 102, 103, 104, 105, - /* 380 */ 106, 107, 108, 109, 110, 111, 112, 192, 130, 111, - /* 390 */ 112, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 400 */ 111, 112, 152, 153, 154, 237, 238, 296, 192, 214, - /* 410 */ 215, 228, 192, 307, 192, 19, 59, 311, 250, 23, - /* 420 */ 22, 106, 107, 108, 109, 110, 111, 112, 192, 72, - /* 430 */ 214, 215, 264, 192, 214, 215, 214, 215, 149, 43, - /* 440 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 450 */ 54, 55, 56, 57, 117, 214, 215, 59, 19, 187, - /* 460 */ 192, 189, 23, 81, 296, 192, 194, 251, 196, 59, - /* 470 */ 229, 251, 115, 116, 117, 203, 260, 106, 107, 142, - /* 480 */ 260, 267, 43, 44, 45, 46, 47, 48, 49, 50, - /* 490 */ 51, 52, 53, 54, 55, 56, 57, 261, 102, 103, - /* 500 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 237, - /* 510 */ 238, 76, 192, 115, 116, 117, 144, 192, 76, 137, - /* 520 */ 138, 192, 250, 152, 89, 154, 116, 92, 19, 87, - /* 530 */ 262, 89, 23, 22, 92, 163, 264, 192, 22, 214, - /* 540 */ 215, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 550 */ 111, 112, 43, 44, 45, 46, 47, 48, 49, 50, - /* 560 */ 51, 52, 53, 54, 55, 56, 57, 19, 296, 118, - /* 570 */ 59, 23, 121, 122, 123, 59, 251, 26, 46, 306, - /* 580 */ 307, 261, 131, 192, 311, 192, 144, 192, 22, 203, - /* 590 */ 100, 43, 44, 45, 46, 47, 48, 49, 50, 51, - /* 600 */ 52, 53, 54, 55, 56, 57, 116, 214, 215, 271, - /* 610 */ 120, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 620 */ 111, 112, 229, 237, 238, 59, 115, 116, 117, 299, - /* 630 */ 300, 115, 116, 117, 59, 16, 250, 19, 192, 192, - /* 640 */ 19, 23, 152, 153, 154, 24, 114, 309, 310, 192, - /* 650 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 660 */ 112, 43, 44, 45, 46, 47, 48, 49, 50, 51, - /* 670 */ 52, 53, 54, 55, 56, 57, 19, 7, 8, 9, - /* 680 */ 23, 115, 116, 117, 203, 290, 239, 238, 137, 138, - /* 690 */ 115, 116, 117, 236, 192, 22, 77, 81, 79, 250, - /* 700 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 710 */ 53, 54, 55, 56, 57, 192, 95, 142, 237, 238, - /* 720 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 730 */ 112, 250, 59, 112, 192, 119, 26, 214, 215, 118, - /* 740 */ 119, 120, 121, 122, 123, 124, 19, 192, 267, 302, - /* 750 */ 23, 130, 229, 137, 138, 23, 214, 215, 26, 102, - /* 760 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 770 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 780 */ 53, 54, 55, 56, 57, 19, 76, 11, 115, 116, - /* 790 */ 117, 192, 29, 251, 239, 73, 33, 192, 192, 89, - /* 800 */ 192, 192, 92, 192, 192, 126, 127, 128, 224, 43, - /* 810 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 820 */ 54, 55, 56, 57, 192, 35, 214, 215, 65, 102, - /* 830 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 840 */ 59, 229, 192, 192, 239, 239, 214, 215, 126, 127, - /* 850 */ 128, 126, 127, 128, 307, 19, 66, 302, 311, 192, - /* 860 */ 261, 229, 224, 22, 74, 214, 215, 192, 102, 103, - /* 870 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 43, - /* 880 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 890 */ 54, 55, 56, 57, 192, 192, 115, 116, 117, 19, - /* 900 */ 59, 290, 251, 127, 128, 192, 23, 302, 302, 26, - /* 910 */ 302, 236, 192, 22, 21, 24, 214, 215, 192, 129, - /* 920 */ 22, 192, 24, 142, 158, 45, 46, 47, 48, 49, - /* 930 */ 50, 51, 52, 53, 54, 55, 56, 57, 102, 103, - /* 940 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 192, - /* 950 */ 59, 12, 192, 251, 192, 305, 192, 116, 22, 23, - /* 960 */ 242, 203, 26, 203, 24, 236, 27, 237, 238, 266, - /* 970 */ 252, 214, 215, 80, 214, 215, 214, 215, 214, 215, - /* 980 */ 250, 42, 102, 103, 104, 105, 106, 107, 108, 109, - /* 990 */ 110, 111, 112, 229, 158, 237, 238, 237, 238, 59, - /* 1000 */ 117, 281, 63, 192, 192, 192, 192, 116, 250, 192, - /* 1010 */ 250, 251, 73, 251, 19, 122, 290, 237, 238, 24, - /* 1020 */ 260, 209, 210, 209, 210, 142, 242, 214, 215, 197, - /* 1030 */ 250, 22, 23, 276, 19, 26, 252, 101, 43, 44, - /* 1040 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - /* 1050 */ 55, 56, 57, 160, 19, 211, 116, 192, 43, 44, - /* 1060 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - /* 1070 */ 55, 56, 57, 192, 192, 22, 192, 266, 43, 44, - /* 1080 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - /* 1090 */ 55, 56, 57, 192, 282, 192, 282, 102, 103, 104, - /* 1100 */ 105, 106, 107, 108, 109, 110, 111, 112, 59, 192, - /* 1110 */ 101, 279, 192, 192, 230, 283, 192, 102, 103, 104, - /* 1120 */ 105, 106, 107, 108, 109, 110, 111, 112, 204, 211, - /* 1130 */ 66, 214, 215, 289, 214, 215, 108, 102, 103, 104, - /* 1140 */ 105, 106, 107, 108, 109, 110, 111, 112, 266, 85, - /* 1150 */ 226, 192, 228, 22, 23, 106, 107, 19, 94, 106, - /* 1160 */ 107, 192, 134, 114, 115, 116, 117, 139, 119, 266, - /* 1170 */ 203, 206, 207, 214, 215, 192, 127, 192, 206, 207, - /* 1180 */ 59, 192, 44, 45, 46, 47, 48, 49, 50, 51, - /* 1190 */ 52, 53, 54, 55, 56, 57, 192, 76, 192, 214, - /* 1200 */ 215, 152, 284, 154, 237, 238, 192, 289, 87, 145, - /* 1210 */ 89, 19, 20, 92, 22, 22, 23, 250, 307, 236, - /* 1220 */ 214, 215, 311, 203, 12, 247, 192, 249, 36, 307, - /* 1230 */ 192, 262, 101, 311, 137, 138, 115, 116, 117, 27, - /* 1240 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 1250 */ 112, 59, 214, 215, 42, 203, 307, 237, 238, 192, - /* 1260 */ 311, 192, 26, 71, 192, 144, 262, 297, 298, 203, - /* 1270 */ 250, 19, 20, 81, 22, 63, 262, 254, 255, 15, - /* 1280 */ 26, 214, 215, 214, 215, 26, 214, 215, 36, 237, - /* 1290 */ 238, 192, 100, 114, 101, 192, 262, 192, 106, 107, - /* 1300 */ 48, 134, 250, 237, 238, 113, 139, 115, 116, 117, - /* 1310 */ 192, 59, 120, 214, 215, 242, 250, 214, 215, 214, - /* 1320 */ 215, 148, 149, 71, 60, 252, 242, 192, 149, 137, - /* 1330 */ 138, 192, 214, 215, 192, 19, 252, 85, 192, 59, - /* 1340 */ 192, 157, 90, 159, 152, 153, 154, 155, 156, 214, - /* 1350 */ 215, 192, 100, 214, 215, 19, 214, 215, 106, 107, - /* 1360 */ 214, 215, 214, 215, 22, 113, 192, 115, 116, 117, - /* 1370 */ 192, 242, 120, 214, 215, 192, 24, 192, 31, 192, - /* 1380 */ 144, 252, 26, 192, 125, 99, 39, 192, 214, 215, - /* 1390 */ 192, 59, 214, 215, 192, 141, 116, 214, 215, 214, - /* 1400 */ 215, 214, 215, 61, 152, 153, 154, 155, 156, 0, - /* 1410 */ 1, 2, 214, 215, 5, 192, 214, 215, 132, 10, - /* 1420 */ 11, 12, 13, 14, 1, 2, 17, 192, 5, 19, - /* 1430 */ 20, 115, 22, 10, 11, 12, 13, 14, 192, 30, - /* 1440 */ 17, 32, 23, 192, 23, 26, 36, 26, 116, 40, - /* 1450 */ 192, 115, 192, 30, 192, 32, 119, 120, 59, 5, - /* 1460 */ 214, 215, 128, 40, 10, 11, 12, 13, 14, 59, - /* 1470 */ 19, 17, 214, 215, 214, 215, 214, 215, 120, 70, - /* 1480 */ 192, 71, 22, 192, 30, 151, 32, 78, 130, 128, - /* 1490 */ 81, 192, 140, 70, 40, 85, 192, 141, 7, 8, - /* 1500 */ 90, 78, 214, 215, 81, 214, 215, 98, 83, 84, - /* 1510 */ 100, 192, 151, 214, 215, 116, 106, 107, 214, 215, - /* 1520 */ 192, 98, 192, 113, 70, 115, 116, 117, 23, 224, - /* 1530 */ 120, 26, 78, 214, 215, 81, 19, 20, 152, 22, - /* 1540 */ 154, 132, 214, 215, 214, 215, 137, 138, 97, 192, - /* 1550 */ 256, 192, 98, 36, 23, 132, 192, 26, 192, 192, - /* 1560 */ 137, 138, 152, 153, 154, 155, 156, 192, 192, 192, - /* 1570 */ 161, 214, 215, 214, 215, 192, 59, 192, 214, 215, - /* 1580 */ 214, 215, 192, 152, 161, 154, 132, 192, 71, 214, - /* 1590 */ 215, 137, 138, 192, 192, 192, 19, 20, 192, 22, - /* 1600 */ 140, 253, 85, 192, 214, 215, 192, 90, 23, 214, - /* 1610 */ 215, 26, 192, 36, 192, 161, 23, 100, 192, 26, - /* 1620 */ 214, 215, 192, 106, 107, 214, 215, 192, 23, 192, - /* 1630 */ 113, 26, 115, 116, 117, 23, 59, 120, 26, 23, - /* 1640 */ 23, 23, 26, 26, 26, 316, 234, 23, 71, 23, - /* 1650 */ 26, 192, 26, 192, 192, 192, 192, 192, 192, 192, - /* 1660 */ 192, 253, 212, 190, 286, 285, 253, 240, 253, 152, - /* 1670 */ 153, 154, 155, 156, 241, 243, 295, 100, 291, 291, - /* 1680 */ 223, 253, 227, 106, 107, 108, 269, 244, 244, 265, - /* 1690 */ 113, 257, 115, 116, 117, 257, 243, 120, 269, 218, - /* 1700 */ 217, 265, 217, 19, 20, 217, 22, 195, 269, 269, - /* 1710 */ 60, 295, 140, 257, 243, 241, 247, 247, 199, 278, - /* 1720 */ 36, 199, 199, 38, 19, 20, 150, 22, 149, 152, - /* 1730 */ 153, 154, 155, 156, 22, 43, 232, 295, 292, 292, - /* 1740 */ 18, 36, 235, 59, 268, 270, 235, 199, 235, 235, - /* 1750 */ 18, 198, 148, 281, 244, 71, 270, 270, 268, 244, - /* 1760 */ 232, 232, 244, 199, 59, 244, 198, 157, 288, 62, - /* 1770 */ 199, 287, 198, 22, 219, 219, 71, 199, 198, 114, - /* 1780 */ 199, 198, 216, 216, 100, 216, 225, 64, 22, 125, - /* 1790 */ 106, 107, 222, 164, 219, 222, 24, 113, 216, 115, - /* 1800 */ 116, 117, 218, 216, 120, 100, 216, 216, 310, 303, - /* 1810 */ 112, 106, 107, 225, 280, 280, 219, 143, 113, 259, - /* 1820 */ 115, 116, 117, 114, 259, 120, 199, 91, 82, 147, - /* 1830 */ 144, 315, 22, 275, 199, 315, 152, 153, 154, 155, - /* 1840 */ 156, 146, 145, 247, 258, 157, 25, 258, 245, 248, - /* 1850 */ 244, 259, 258, 202, 259, 248, 258, 152, 153, 154, - /* 1860 */ 155, 156, 263, 263, 26, 246, 13, 201, 193, 193, - /* 1870 */ 6, 191, 191, 205, 191, 220, 220, 205, 211, 277, - /* 1880 */ 211, 211, 211, 205, 4, 212, 3, 22, 162, 212, - /* 1890 */ 211, 15, 23, 16, 23, 138, 129, 150, 26, 24, - /* 1900 */ 141, 20, 16, 143, 1, 141, 129, 129, 61, 301, - /* 1910 */ 301, 298, 150, 53, 53, 37, 53, 129, 115, 53, - /* 1920 */ 140, 34, 1, 5, 22, 114, 68, 26, 160, 75, - /* 1930 */ 68, 41, 140, 114, 24, 20, 19, 130, 124, 23, - /* 1940 */ 67, 22, 22, 59, 22, 22, 67, 96, 24, 22, - /* 1950 */ 37, 23, 67, 28, 148, 22, 26, 23, 23, 23, - /* 1960 */ 23, 22, 140, 23, 97, 23, 34, 115, 22, 142, - /* 1970 */ 26, 75, 34, 44, 75, 34, 88, 34, 86, 34, - /* 1980 */ 23, 34, 93, 22, 24, 26, 34, 23, 26, 23, - /* 1990 */ 23, 23, 23, 11, 23, 22, 26, 22, 22, 140, - /* 2000 */ 23, 23, 22, 22, 134, 26, 23, 15, 140, 1, - /* 2010 */ 1, 317, 317, 317, 140, 317, 317, 317, 317, 317, - /* 2020 */ 317, 317, 140, 317, 317, 317, 317, 317, 317, 317, - /* 2030 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2040 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2050 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2060 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2070 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2080 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2090 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2100 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2110 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2120 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2130 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2140 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2150 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2160 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2170 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2180 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2190 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, - /* 2200 */ 317, 317, 317, 317, 317, 317, 317, + /* 0 */ 193, 223, 193, 225, 193, 193, 193, 274, 275, 276, + /* 10 */ 193, 233, 219, 193, 274, 275, 276, 206, 206, 19, + /* 20 */ 193, 219, 216, 216, 217, 216, 217, 193, 295, 216, + /* 30 */ 217, 31, 205, 216, 217, 193, 216, 217, 213, 39, + /* 40 */ 228, 193, 230, 43, 44, 45, 46, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 193, 19, + /* 60 */ 185, 186, 187, 188, 189, 190, 253, 274, 275, 276, + /* 70 */ 195, 193, 197, 253, 216, 262, 274, 275, 276, 204, + /* 80 */ 238, 204, 262, 43, 44, 45, 46, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 264, 274, + /* 100 */ 275, 276, 102, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 113, 239, 240, 239, 240, 210, 211, + /* 120 */ 212, 314, 315, 314, 59, 316, 86, 252, 88, 252, + /* 130 */ 19, 314, 315, 256, 257, 309, 25, 72, 296, 313, + /* 140 */ 193, 266, 102, 103, 104, 105, 106, 107, 108, 109, + /* 150 */ 110, 111, 112, 113, 43, 44, 45, 46, 47, 48, + /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 81, + /* 170 */ 292, 59, 307, 298, 108, 109, 110, 111, 112, 113, + /* 180 */ 69, 116, 117, 118, 72, 128, 129, 193, 241, 22, + /* 190 */ 113, 54, 55, 56, 57, 58, 102, 103, 104, 105, + /* 200 */ 106, 107, 108, 109, 110, 111, 112, 113, 120, 193, + /* 210 */ 216, 217, 25, 102, 103, 104, 105, 106, 107, 108, + /* 220 */ 109, 110, 111, 112, 113, 231, 138, 139, 116, 117, + /* 230 */ 118, 106, 107, 19, 216, 54, 55, 56, 57, 102, + /* 240 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 250 */ 113, 304, 25, 46, 47, 48, 49, 43, 44, 45, + /* 260 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + /* 270 */ 56, 57, 193, 106, 107, 59, 193, 19, 153, 263, + /* 280 */ 155, 67, 24, 102, 103, 104, 105, 106, 107, 108, + /* 290 */ 109, 110, 111, 112, 113, 216, 217, 59, 230, 216, + /* 300 */ 217, 43, 44, 45, 46, 47, 48, 49, 50, 51, + /* 310 */ 52, 53, 54, 55, 56, 57, 102, 103, 104, 105, + /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 121, 142, + /* 330 */ 59, 193, 116, 117, 118, 119, 253, 204, 122, 123, + /* 340 */ 124, 19, 20, 81, 22, 262, 108, 19, 132, 165, + /* 350 */ 166, 193, 24, 126, 116, 117, 118, 278, 36, 193, + /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 370 */ 112, 113, 239, 240, 216, 217, 215, 106, 107, 241, + /* 380 */ 19, 59, 216, 217, 223, 252, 115, 116, 117, 118, + /* 390 */ 73, 120, 26, 71, 193, 22, 193, 231, 81, 128, + /* 400 */ 138, 139, 269, 81, 43, 44, 45, 46, 47, 48, + /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 216, + /* 420 */ 217, 198, 100, 95, 153, 59, 155, 193, 106, 107, + /* 430 */ 235, 236, 59, 193, 193, 249, 114, 251, 116, 117, + /* 440 */ 118, 113, 304, 121, 127, 204, 193, 119, 120, 121, + /* 450 */ 122, 123, 124, 125, 193, 138, 139, 216, 217, 131, + /* 460 */ 138, 139, 193, 102, 103, 104, 105, 106, 107, 108, + /* 470 */ 109, 110, 111, 112, 113, 153, 154, 155, 156, 157, + /* 480 */ 239, 240, 116, 117, 118, 76, 193, 193, 19, 116, + /* 490 */ 117, 118, 23, 252, 253, 193, 87, 204, 89, 238, + /* 500 */ 193, 92, 268, 262, 281, 203, 193, 205, 285, 216, + /* 510 */ 217, 150, 43, 44, 45, 46, 47, 48, 49, 50, + /* 520 */ 51, 52, 53, 54, 55, 56, 57, 193, 193, 216, + /* 530 */ 217, 19, 239, 240, 59, 23, 106, 107, 108, 109, + /* 540 */ 110, 111, 112, 113, 231, 252, 253, 193, 308, 309, + /* 550 */ 193, 145, 59, 313, 145, 43, 44, 45, 46, 47, + /* 560 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 570 */ 164, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 580 */ 111, 112, 113, 119, 193, 193, 122, 123, 124, 193, + /* 590 */ 283, 116, 117, 118, 235, 236, 132, 59, 241, 264, + /* 600 */ 59, 193, 19, 23, 193, 25, 23, 216, 217, 116, + /* 610 */ 117, 118, 216, 217, 102, 103, 104, 105, 106, 107, + /* 620 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, + /* 630 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 640 */ 57, 19, 308, 309, 151, 23, 25, 313, 135, 253, + /* 650 */ 21, 193, 241, 140, 116, 117, 118, 116, 117, 118, + /* 660 */ 268, 304, 22, 301, 302, 43, 44, 45, 46, 47, + /* 670 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 680 */ 193, 143, 193, 193, 143, 102, 103, 104, 105, 106, + /* 690 */ 107, 108, 109, 110, 111, 112, 113, 76, 118, 59, + /* 700 */ 292, 211, 212, 216, 217, 216, 217, 73, 193, 80, + /* 710 */ 89, 25, 19, 92, 193, 304, 23, 22, 231, 193, + /* 720 */ 231, 193, 22, 143, 102, 103, 104, 105, 106, 107, + /* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, + /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 750 */ 57, 19, 123, 193, 59, 23, 116, 117, 118, 59, + /* 760 */ 193, 127, 128, 129, 306, 307, 210, 211, 212, 193, + /* 770 */ 22, 111, 112, 113, 284, 43, 44, 45, 46, 47, + /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 790 */ 161, 193, 216, 217, 268, 102, 103, 104, 105, 106, + /* 800 */ 107, 108, 109, 110, 111, 112, 113, 59, 193, 193, + /* 810 */ 193, 116, 117, 118, 216, 217, 116, 117, 118, 304, + /* 820 */ 239, 240, 19, 263, 138, 139, 23, 211, 212, 231, + /* 830 */ 263, 216, 217, 252, 102, 103, 104, 105, 106, 107, + /* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, + /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 860 */ 57, 19, 193, 11, 116, 117, 118, 240, 253, 193, + /* 870 */ 201, 239, 240, 193, 134, 206, 136, 137, 193, 252, + /* 880 */ 193, 264, 193, 193, 252, 43, 44, 45, 46, 47, + /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 900 */ 284, 216, 217, 216, 217, 102, 103, 104, 105, 106, + /* 910 */ 107, 108, 109, 110, 111, 112, 113, 193, 231, 193, + /* 920 */ 187, 188, 189, 190, 127, 128, 129, 238, 195, 193, + /* 930 */ 197, 16, 19, 7, 8, 9, 193, 204, 253, 193, + /* 940 */ 216, 217, 216, 217, 102, 103, 104, 105, 106, 107, + /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, + /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 970 */ 57, 213, 239, 240, 193, 76, 19, 188, 232, 190, + /* 980 */ 128, 129, 292, 193, 195, 252, 197, 46, 89, 138, + /* 990 */ 139, 92, 77, 204, 79, 193, 269, 216, 217, 266, + /* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52, + /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, + /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240, + /* 1030 */ 193, 298, 22, 23, 253, 239, 240, 127, 128, 129, + /* 1040 */ 238, 252, 27, 193, 286, 204, 193, 204, 252, 291, + /* 1050 */ 193, 273, 22, 23, 100, 266, 115, 42, 268, 102, + /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1070 */ 113, 117, 159, 216, 217, 121, 161, 19, 63, 193, + /* 1080 */ 239, 240, 239, 240, 12, 208, 209, 298, 73, 311, + /* 1090 */ 312, 238, 19, 252, 25, 252, 22, 24, 24, 27, + /* 1100 */ 193, 264, 216, 217, 46, 208, 209, 153, 154, 155, + /* 1110 */ 253, 101, 19, 23, 42, 25, 43, 44, 45, 46, + /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1130 */ 57, 101, 19, 59, 25, 63, 43, 44, 45, 46, + /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1150 */ 57, 22, 23, 115, 25, 24, 43, 44, 45, 46, + /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1170 */ 57, 22, 23, 115, 25, 102, 103, 104, 105, 106, + /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 118, 150, 131, + /* 1190 */ 59, 117, 22, 273, 193, 102, 103, 104, 105, 106, + /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 204, 66, 204, + /* 1210 */ 35, 204, 143, 213, 193, 102, 103, 104, 105, 106, + /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 85, 193, 59, + /* 1230 */ 101, 311, 312, 16, 193, 19, 94, 216, 217, 238, + /* 1240 */ 193, 66, 239, 240, 239, 240, 239, 240, 117, 74, + /* 1250 */ 101, 216, 217, 193, 193, 252, 193, 252, 149, 252, + /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 1270 */ 54, 55, 56, 57, 193, 193, 193, 5, 59, 216, + /* 1280 */ 217, 25, 10, 11, 12, 13, 14, 117, 146, 17, + /* 1290 */ 193, 291, 193, 232, 77, 76, 79, 216, 217, 216, + /* 1300 */ 217, 31, 30, 309, 32, 130, 87, 313, 89, 39, + /* 1310 */ 193, 92, 40, 216, 217, 216, 217, 108, 102, 103, + /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + /* 1330 */ 299, 300, 193, 216, 217, 116, 117, 118, 19, 20, + /* 1340 */ 193, 22, 70, 309, 135, 193, 264, 313, 193, 140, + /* 1350 */ 78, 193, 226, 81, 59, 36, 193, 309, 193, 29, + /* 1360 */ 193, 313, 193, 33, 145, 193, 59, 48, 216, 217, + /* 1370 */ 98, 216, 217, 193, 216, 217, 193, 244, 59, 216, + /* 1380 */ 217, 216, 217, 216, 217, 216, 217, 254, 216, 217, + /* 1390 */ 71, 193, 244, 193, 193, 65, 216, 217, 193, 216, + /* 1400 */ 217, 145, 254, 244, 85, 133, 15, 100, 193, 90, + /* 1410 */ 138, 139, 117, 254, 216, 217, 216, 217, 193, 100, + /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 19, 121, 193, + /* 1430 */ 193, 216, 217, 114, 162, 116, 117, 118, 244, 244, + /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 129, 254, 254, + /* 1450 */ 313, 60, 216, 217, 19, 256, 257, 193, 120, 121, + /* 1460 */ 153, 154, 155, 149, 150, 25, 24, 99, 216, 217, + /* 1470 */ 152, 193, 153, 154, 155, 156, 157, 0, 1, 2, + /* 1480 */ 216, 217, 5, 22, 158, 24, 160, 10, 11, 12, + /* 1490 */ 13, 14, 193, 23, 17, 25, 193, 19, 20, 193, + /* 1500 */ 22, 133, 193, 22, 22, 193, 22, 30, 193, 32, + /* 1510 */ 19, 20, 129, 22, 36, 216, 217, 40, 193, 216, + /* 1520 */ 217, 193, 216, 217, 116, 216, 217, 36, 216, 217, + /* 1530 */ 193, 216, 217, 193, 53, 152, 193, 59, 23, 19, + /* 1540 */ 25, 216, 217, 61, 216, 217, 23, 70, 25, 71, + /* 1550 */ 59, 116, 193, 216, 217, 78, 216, 217, 81, 216, + /* 1560 */ 217, 59, 71, 85, 193, 23, 193, 25, 90, 23, + /* 1570 */ 23, 25, 25, 7, 8, 98, 85, 193, 100, 193, + /* 1580 */ 59, 90, 142, 141, 106, 107, 193, 216, 217, 216, + /* 1590 */ 217, 100, 114, 193, 116, 117, 118, 106, 107, 121, + /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118, + /* 1610 */ 133, 23, 121, 25, 121, 138, 139, 97, 23, 117, + /* 1620 */ 25, 23, 193, 25, 131, 141, 193, 216, 217, 59, + /* 1630 */ 193, 153, 154, 155, 156, 157, 226, 193, 117, 162, + /* 1640 */ 23, 23, 25, 25, 153, 154, 155, 156, 157, 1, + /* 1650 */ 2, 83, 84, 5, 19, 20, 226, 22, 10, 11, + /* 1660 */ 12, 13, 14, 258, 153, 17, 155, 153, 23, 155, + /* 1670 */ 25, 36, 23, 193, 25, 255, 193, 236, 30, 193, + /* 1680 */ 32, 19, 20, 193, 22, 193, 288, 117, 40, 193, + /* 1690 */ 318, 193, 193, 193, 59, 242, 193, 193, 36, 193, + /* 1700 */ 193, 193, 287, 255, 255, 255, 71, 255, 243, 214, + /* 1710 */ 191, 297, 267, 245, 271, 259, 259, 293, 70, 246, + /* 1720 */ 246, 59, 267, 229, 245, 271, 78, 293, 259, 81, + /* 1730 */ 271, 271, 220, 71, 225, 100, 219, 219, 249, 196, + /* 1740 */ 243, 106, 107, 108, 219, 60, 98, 280, 297, 114, + /* 1750 */ 249, 116, 117, 118, 141, 245, 121, 200, 200, 297, + /* 1760 */ 38, 200, 100, 151, 150, 294, 294, 22, 106, 107, + /* 1770 */ 283, 43, 234, 18, 237, 200, 114, 272, 116, 117, + /* 1780 */ 118, 133, 237, 121, 18, 237, 138, 139, 153, 154, + /* 1790 */ 155, 156, 157, 237, 270, 199, 19, 20, 246, 22, + /* 1800 */ 149, 272, 272, 270, 200, 246, 234, 234, 246, 246, + /* 1810 */ 162, 158, 290, 36, 199, 153, 154, 155, 156, 157, + /* 1820 */ 62, 289, 200, 199, 22, 200, 221, 199, 221, 200, + /* 1830 */ 199, 115, 64, 227, 218, 22, 59, 218, 218, 126, + /* 1840 */ 165, 24, 113, 312, 218, 224, 305, 224, 71, 282, + /* 1850 */ 144, 221, 220, 282, 218, 218, 218, 115, 261, 260, + /* 1860 */ 227, 221, 261, 260, 200, 91, 317, 317, 82, 261, + /* 1870 */ 260, 148, 261, 22, 265, 145, 200, 100, 158, 277, + /* 1880 */ 147, 146, 25, 106, 107, 202, 13, 260, 194, 250, + /* 1890 */ 249, 114, 248, 116, 117, 118, 250, 247, 121, 265, + /* 1900 */ 246, 194, 6, 192, 192, 207, 192, 207, 213, 213, + /* 1910 */ 213, 222, 213, 222, 4, 214, 214, 213, 3, 22, + /* 1920 */ 163, 279, 207, 15, 23, 16, 23, 139, 130, 151, + /* 1930 */ 153, 154, 155, 156, 157, 142, 25, 24, 20, 144, + /* 1940 */ 16, 1, 142, 61, 130, 130, 303, 151, 53, 37, + /* 1950 */ 303, 53, 300, 53, 130, 53, 116, 34, 1, 141, + /* 1960 */ 5, 22, 115, 25, 161, 75, 41, 68, 141, 115, + /* 1970 */ 24, 20, 68, 19, 131, 125, 23, 96, 22, 59, + /* 1980 */ 22, 67, 22, 22, 67, 22, 24, 28, 67, 37, + /* 1990 */ 23, 149, 22, 25, 23, 23, 23, 23, 22, 141, + /* 2000 */ 23, 23, 97, 116, 22, 143, 25, 88, 75, 34, + /* 2010 */ 44, 75, 86, 34, 34, 93, 23, 34, 34, 34, + /* 2020 */ 22, 24, 34, 22, 25, 25, 23, 23, 23, 23, + /* 2030 */ 23, 11, 23, 25, 22, 22, 25, 23, 23, 22, + /* 2040 */ 22, 135, 15, 1, 25, 23, 1, 319, 141, 319, + /* 2050 */ 319, 319, 319, 319, 319, 319, 319, 141, 319, 319, + /* 2060 */ 319, 319, 319, 319, 319, 319, 319, 319, 141, 141, + /* 2070 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2080 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2090 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2100 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2110 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2130 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2140 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2150 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2160 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2170 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2180 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2190 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2200 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2210 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2220 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2230 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2240 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + /* 2250 */ 319, 319, 319, 319, 319, }; -#define YY_SHIFT_COUNT (569) +#define YY_SHIFT_COUNT (573) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2009) +#define YY_SHIFT_MAX (2045) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1423, 1409, 1454, 1192, 1192, 382, 1252, 1410, 1517, 1684, - /* 10 */ 1684, 1684, 221, 0, 0, 180, 1015, 1684, 1684, 1684, - /* 20 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, - /* 30 */ 1049, 1049, 1121, 1121, 54, 616, 382, 382, 382, 382, - /* 40 */ 382, 40, 110, 219, 289, 396, 439, 509, 548, 618, - /* 50 */ 657, 727, 766, 836, 995, 1015, 1015, 1015, 1015, 1015, - /* 60 */ 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, - /* 70 */ 1015, 1015, 1015, 1035, 1015, 1138, 880, 880, 1577, 1684, - /* 80 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, - /* 90 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, - /* 100 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, - /* 110 */ 1684, 1684, 1684, 1705, 1684, 1684, 1684, 1684, 1684, 1684, - /* 120 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 146, 84, 84, - /* 130 */ 84, 84, 84, 274, 315, 125, 97, 357, 66, 66, - /* 140 */ 893, 258, 66, 66, 371, 371, 66, 551, 551, 551, - /* 150 */ 551, 192, 209, 209, 278, 127, 2023, 2023, 621, 621, - /* 160 */ 621, 201, 398, 398, 398, 398, 939, 939, 442, 936, - /* 170 */ 1009, 66, 66, 66, 66, 66, 66, 66, 66, 66, - /* 180 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, - /* 190 */ 66, 710, 710, 66, 776, 435, 435, 410, 410, 372, - /* 200 */ 1097, 2023, 2023, 2023, 2023, 2023, 2023, 2023, 250, 490, - /* 210 */ 490, 511, 451, 516, 252, 566, 575, 781, 673, 66, - /* 220 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 722, - /* 230 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, - /* 240 */ 66, 66, 790, 790, 790, 66, 66, 66, 883, 66, - /* 250 */ 66, 66, 891, 1064, 66, 66, 1212, 66, 66, 66, - /* 260 */ 66, 66, 66, 66, 66, 725, 763, 177, 940, 940, - /* 270 */ 940, 940, 337, 177, 177, 1028, 1053, 670, 1264, 1179, - /* 280 */ 1173, 1254, 1316, 1173, 1316, 1336, 50, 1179, 1179, 50, - /* 290 */ 1179, 1254, 1336, 1259, 732, 532, 1347, 1347, 1347, 1316, - /* 300 */ 1236, 1236, 1184, 1356, 1167, 898, 1650, 1650, 1572, 1572, - /* 310 */ 1685, 1685, 1572, 1576, 1579, 1712, 1692, 1722, 1722, 1722, - /* 320 */ 1722, 1572, 1732, 1604, 1579, 1579, 1604, 1712, 1692, 1604, - /* 330 */ 1692, 1604, 1572, 1732, 1610, 1707, 1572, 1732, 1751, 1572, - /* 340 */ 1732, 1572, 1732, 1751, 1665, 1665, 1665, 1723, 1766, 1766, - /* 350 */ 1751, 1665, 1664, 1665, 1723, 1665, 1665, 1629, 1772, 1698, - /* 360 */ 1698, 1751, 1674, 1709, 1674, 1709, 1674, 1709, 1674, 1709, - /* 370 */ 1572, 1736, 1736, 1746, 1746, 1682, 1686, 1810, 1572, 1688, - /* 380 */ 1682, 1695, 1697, 1604, 1821, 1838, 1853, 1853, 1864, 1864, - /* 390 */ 1864, 2023, 2023, 2023, 2023, 2023, 2023, 2023, 2023, 2023, - /* 400 */ 2023, 2023, 2023, 2023, 2023, 2023, 232, 101, 1131, 1193, - /* 410 */ 619, 679, 841, 1421, 1286, 115, 1352, 1334, 1361, 1419, - /* 420 */ 1342, 1505, 1531, 1585, 1593, 1605, 1612, 1280, 1337, 1491, - /* 430 */ 1358, 1451, 1332, 1616, 1617, 1425, 1618, 1386, 1431, 1624, - /* 440 */ 1626, 1399, 1460, 1880, 1883, 1865, 1726, 1876, 1877, 1869, - /* 450 */ 1871, 1757, 1747, 1767, 1872, 1872, 1875, 1759, 1881, 1760, - /* 460 */ 1886, 1903, 1764, 1777, 1872, 1778, 1847, 1878, 1872, 1762, - /* 470 */ 1860, 1861, 1863, 1866, 1788, 1803, 1887, 1780, 1921, 1918, - /* 480 */ 1902, 1811, 1768, 1858, 1901, 1862, 1854, 1890, 1792, 1819, - /* 490 */ 1910, 1915, 1917, 1807, 1814, 1919, 1873, 1920, 1922, 1916, - /* 500 */ 1923, 1879, 1884, 1924, 1851, 1925, 1927, 1885, 1913, 1928, - /* 510 */ 1806, 1933, 1934, 1935, 1936, 1930, 1937, 1939, 1867, 1822, - /* 520 */ 1940, 1942, 1852, 1932, 1946, 1827, 1944, 1938, 1941, 1943, - /* 530 */ 1945, 1888, 1896, 1892, 1929, 1899, 1889, 1947, 1957, 1961, - /* 540 */ 1960, 1959, 1962, 1952, 1964, 1944, 1966, 1967, 1968, 1969, - /* 550 */ 1970, 1971, 1973, 1982, 1975, 1976, 1977, 1978, 1980, 1981, - /* 560 */ 1979, 1870, 1859, 1868, 1874, 1882, 1983, 1992, 2008, 2009, + /* 0 */ 1648, 1477, 1272, 322, 322, 262, 1319, 1478, 1491, 1662, + /* 10 */ 1662, 1662, 317, 0, 0, 214, 1093, 1662, 1662, 1662, + /* 20 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, + /* 30 */ 271, 271, 1219, 1219, 216, 88, 262, 262, 262, 262, + /* 40 */ 262, 40, 111, 258, 361, 469, 512, 583, 622, 693, + /* 50 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, + /* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, + /* 70 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, 1662, + /* 80 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, + /* 90 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, + /* 100 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, + /* 110 */ 1662, 1662, 1662, 1662, 1777, 1662, 1662, 1662, 1662, 1662, + /* 120 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 137, 181, + /* 130 */ 181, 181, 181, 181, 94, 430, 66, 65, 112, 366, + /* 140 */ 475, 475, 629, 1058, 475, 475, 125, 125, 475, 686, + /* 150 */ 686, 686, 660, 686, 57, 184, 184, 77, 77, 2070, + /* 160 */ 2070, 328, 328, 328, 493, 373, 373, 373, 373, 1015, + /* 170 */ 1015, 409, 366, 1129, 1149, 475, 475, 475, 475, 475, + /* 180 */ 475, 475, 475, 475, 475, 475, 475, 475, 475, 475, + /* 190 */ 475, 475, 475, 475, 475, 621, 621, 475, 852, 899, + /* 200 */ 899, 1295, 1295, 406, 851, 2070, 2070, 2070, 2070, 2070, + /* 210 */ 2070, 2070, 1307, 954, 954, 640, 464, 695, 238, 700, + /* 220 */ 538, 541, 748, 475, 475, 475, 475, 475, 475, 475, + /* 230 */ 475, 475, 475, 634, 475, 475, 475, 475, 475, 475, + /* 240 */ 475, 475, 475, 475, 475, 475, 1175, 1175, 1175, 475, + /* 250 */ 475, 475, 580, 475, 475, 475, 1074, 1142, 475, 475, + /* 260 */ 1072, 475, 475, 475, 475, 475, 475, 475, 475, 797, + /* 270 */ 1330, 740, 1131, 1131, 1131, 1131, 1069, 740, 740, 1209, + /* 280 */ 167, 926, 1391, 1038, 1314, 187, 1408, 1314, 1408, 1435, + /* 290 */ 1109, 1038, 1038, 1109, 1038, 187, 1435, 227, 1090, 941, + /* 300 */ 1270, 1270, 1270, 1408, 1256, 1256, 1326, 1440, 513, 1461, + /* 310 */ 1685, 1685, 1613, 1613, 1722, 1722, 1613, 1612, 1614, 1745, + /* 320 */ 1728, 1755, 1755, 1755, 1755, 1613, 1766, 1651, 1614, 1614, + /* 330 */ 1651, 1745, 1728, 1651, 1728, 1651, 1613, 1766, 1653, 1758, + /* 340 */ 1613, 1766, 1802, 1613, 1766, 1613, 1766, 1802, 1716, 1716, + /* 350 */ 1716, 1768, 1813, 1813, 1802, 1716, 1713, 1716, 1768, 1716, + /* 360 */ 1716, 1675, 1817, 1729, 1729, 1802, 1706, 1742, 1706, 1742, + /* 370 */ 1706, 1742, 1706, 1742, 1613, 1774, 1774, 1786, 1786, 1723, + /* 380 */ 1730, 1851, 1613, 1720, 1723, 1733, 1735, 1651, 1857, 1873, + /* 390 */ 1873, 1896, 1896, 1896, 2070, 2070, 2070, 2070, 2070, 2070, + /* 400 */ 2070, 2070, 2070, 2070, 2070, 2070, 2070, 2070, 2070, 207, + /* 410 */ 915, 1010, 1030, 1217, 910, 1170, 1470, 1368, 1481, 1442, + /* 420 */ 1318, 1383, 1515, 1482, 1523, 1542, 1546, 1547, 1588, 1595, + /* 430 */ 1502, 1338, 1566, 1493, 1520, 1521, 1598, 1617, 1568, 1618, + /* 440 */ 1511, 1514, 1645, 1649, 1570, 1484, 1910, 1915, 1897, 1757, + /* 450 */ 1908, 1909, 1901, 1903, 1788, 1778, 1798, 1911, 1911, 1913, + /* 460 */ 1793, 1918, 1795, 1924, 1940, 1800, 1814, 1911, 1815, 1882, + /* 470 */ 1912, 1911, 1796, 1895, 1898, 1900, 1902, 1824, 1840, 1923, + /* 480 */ 1818, 1957, 1955, 1939, 1847, 1803, 1899, 1938, 1904, 1890, + /* 490 */ 1925, 1827, 1854, 1946, 1951, 1954, 1843, 1850, 1956, 1914, + /* 500 */ 1958, 1960, 1953, 1961, 1917, 1920, 1962, 1881, 1959, 1963, + /* 510 */ 1921, 1952, 1967, 1842, 1970, 1971, 1972, 1973, 1968, 1974, + /* 520 */ 1976, 1905, 1858, 1977, 1978, 1887, 1975, 1982, 1862, 1981, + /* 530 */ 1979, 1980, 1983, 1984, 1919, 1933, 1926, 1966, 1936, 1922, + /* 540 */ 1985, 1993, 1998, 1997, 1999, 2000, 1988, 2003, 1981, 2004, + /* 550 */ 2005, 2006, 2007, 2008, 2009, 2001, 2020, 2012, 2013, 2014, + /* 560 */ 2015, 2017, 2018, 2011, 1906, 1907, 1916, 1927, 1928, 2019, + /* 570 */ 2022, 2027, 2042, 2045, }; -#define YY_REDUCE_COUNT (405) -#define YY_REDUCE_MIN (-266) -#define YY_REDUCE_MAX (1683) +#define YY_REDUCE_COUNT (408) +#define YY_REDUCE_MIN (-267) +#define YY_REDUCE_MAX (1715) static const short yy_reduce_ofst[] = { - /* 0 */ 111, 168, 272, 760, -177, -175, -192, -190, -182, -179, - /* 10 */ 216, 220, 481, -208, -205, -266, -140, -115, 241, 393, - /* 20 */ 523, 325, 612, 632, 542, 651, 764, 757, 702, 762, - /* 30 */ 812, 814, -188, 273, 924, 386, 758, 967, 1020, 1052, - /* 40 */ 1066, -256, -256, -256, -256, -256, -256, -256, -256, -256, - /* 50 */ -256, -256, -256, -256, -256, -256, -256, -256, -256, -256, - /* 60 */ -256, -256, -256, -256, -256, -256, -256, -256, -256, -256, - /* 70 */ -256, -256, -256, -256, -256, -256, -256, -256, 195, 222, - /* 80 */ 813, 917, 920, 959, 985, 1006, 1038, 1067, 1069, 1072, - /* 90 */ 1099, 1103, 1105, 1118, 1135, 1139, 1142, 1146, 1148, 1159, - /* 100 */ 1174, 1178, 1183, 1185, 1187, 1198, 1202, 1246, 1258, 1260, - /* 110 */ 1262, 1288, 1291, 1299, 1304, 1319, 1328, 1330, 1357, 1359, - /* 120 */ 1364, 1366, 1375, 1390, 1395, 1406, 1411, -256, -256, -256, - /* 130 */ -256, -256, -256, -256, -256, 447, -256, 555, -178, 605, - /* 140 */ 832, -220, 606, -94, -168, 36, -122, 730, 780, 730, - /* 150 */ 780, 918, -136, 338, -256, -256, -256, -256, 80, 80, - /* 160 */ 80, 720, 703, 811, 882, 903, -213, -204, 106, 330, - /* 170 */ 330, -77, 236, 320, 599, 67, 457, 675, 729, 395, - /* 180 */ 268, 611, 969, 1004, 726, 1014, 983, 123, 884, 608, - /* 190 */ 1034, 547, 911, 650, 844, 922, 949, 965, 972, 978, - /* 200 */ 449, 970, 718, 784, 1073, 1084, 1023, 1129, -209, -180, - /* 210 */ -113, 114, 183, 329, 345, 391, 446, 502, 609, 667, - /* 220 */ 713, 817, 865, 881, 901, 921, 989, 1191, 1195, 214, - /* 230 */ 1223, 1235, 1251, 1367, 1376, 1377, 1383, 1385, 1401, 1402, - /* 240 */ 1403, 1414, 584, 638, 1305, 1420, 1422, 1426, 1294, 1430, - /* 250 */ 1435, 1437, 1348, 1329, 1459, 1461, 1412, 1462, 345, 1463, - /* 260 */ 1464, 1465, 1466, 1467, 1468, 1378, 1380, 1427, 1408, 1413, - /* 270 */ 1415, 1428, 1294, 1427, 1427, 1433, 1450, 1473, 1381, 1417, - /* 280 */ 1424, 1432, 1434, 1436, 1438, 1387, 1443, 1429, 1439, 1444, - /* 290 */ 1440, 1453, 1388, 1481, 1455, 1457, 1483, 1485, 1488, 1456, - /* 300 */ 1469, 1470, 1441, 1471, 1474, 1512, 1416, 1442, 1519, 1522, - /* 310 */ 1446, 1447, 1523, 1472, 1475, 1476, 1504, 1507, 1511, 1513, - /* 320 */ 1514, 1548, 1553, 1510, 1486, 1487, 1515, 1490, 1528, 1518, - /* 330 */ 1529, 1521, 1564, 1568, 1480, 1484, 1571, 1574, 1555, 1578, - /* 340 */ 1580, 1581, 1583, 1556, 1566, 1567, 1569, 1561, 1570, 1573, - /* 350 */ 1575, 1582, 1584, 1587, 1588, 1590, 1591, 1498, 1506, 1534, - /* 360 */ 1535, 1597, 1560, 1586, 1565, 1589, 1592, 1594, 1595, 1598, - /* 370 */ 1627, 1516, 1520, 1599, 1600, 1601, 1596, 1558, 1635, 1602, - /* 380 */ 1607, 1619, 1603, 1606, 1651, 1666, 1675, 1676, 1680, 1681, - /* 390 */ 1683, 1608, 1609, 1613, 1668, 1667, 1669, 1670, 1671, 1672, - /* 400 */ 1655, 1656, 1673, 1677, 1679, 1678, + /* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187, + /* 10 */ -180, 83, 133, -207, -198, -267, -175, -6, 166, 313, + /* 20 */ 487, 396, 489, 598, 615, 685, 687, 79, 781, 857, + /* 30 */ 490, 616, 240, 334, -188, 796, 841, 843, 1003, 1005, + /* 40 */ 1007, -260, -260, -260, -260, -260, -260, -260, -260, -260, + /* 50 */ -260, -260, -260, -260, -260, -260, -260, -260, -260, -260, + /* 60 */ -260, -260, -260, -260, -260, -260, -260, -260, -260, -260, + /* 70 */ -260, -260, -260, -260, -260, -260, -260, -260, 158, 203, + /* 80 */ 391, 576, 724, 726, 886, 1021, 1035, 1063, 1081, 1083, + /* 90 */ 1097, 1099, 1117, 1152, 1155, 1158, 1163, 1165, 1167, 1169, + /* 100 */ 1172, 1180, 1183, 1198, 1200, 1205, 1215, 1225, 1227, 1236, + /* 110 */ 1252, 1264, 1299, 1303, 1306, 1309, 1312, 1315, 1325, 1328, + /* 120 */ 1337, 1340, 1343, 1371, 1373, 1384, 1386, 1411, -260, -260, + /* 130 */ -260, -260, -260, -260, -260, -260, -260, -53, 138, 302, + /* 140 */ -158, 357, 223, -222, 411, 458, -92, 556, 669, 581, + /* 150 */ 632, 581, -260, 632, 758, 778, 920, -260, -260, -260, + /* 160 */ -260, 161, 161, 161, 307, 234, 392, 526, 790, 195, + /* 170 */ 359, -174, -173, 362, 362, -189, 16, 560, 567, 261, + /* 180 */ 689, 802, 853, -122, -166, 408, 335, 617, 690, 837, + /* 190 */ 1001, 746, 1061, 515, 1082, 994, 1034, -135, 1000, 1048, + /* 200 */ 1137, 877, 897, 186, 627, 1031, 1133, 1148, 1159, 1194, + /* 210 */ 1199, 1195, -194, -142, 18, -152, 68, 201, 253, 269, + /* 220 */ 294, 354, 521, 528, 676, 680, 736, 743, 850, 907, + /* 230 */ 1041, 1047, 1060, 727, 1139, 1147, 1201, 1237, 1278, 1359, + /* 240 */ 1393, 1400, 1413, 1429, 1433, 1437, 1126, 1410, 1430, 1444, + /* 250 */ 1480, 1483, 1405, 1486, 1490, 1492, 1420, 1372, 1496, 1498, + /* 260 */ 1441, 1499, 253, 1500, 1503, 1504, 1506, 1507, 1508, 1398, + /* 270 */ 1415, 1453, 1448, 1449, 1450, 1452, 1405, 1453, 1453, 1465, + /* 280 */ 1495, 1519, 1414, 1443, 1445, 1468, 1456, 1455, 1457, 1424, + /* 290 */ 1473, 1454, 1459, 1474, 1460, 1479, 1434, 1512, 1494, 1509, + /* 300 */ 1517, 1518, 1525, 1469, 1489, 1501, 1467, 1510, 1497, 1543, + /* 310 */ 1451, 1462, 1557, 1558, 1471, 1472, 1561, 1487, 1505, 1524, + /* 320 */ 1538, 1537, 1545, 1548, 1556, 1575, 1596, 1552, 1529, 1530, + /* 330 */ 1559, 1533, 1572, 1562, 1573, 1563, 1604, 1615, 1522, 1532, + /* 340 */ 1622, 1624, 1605, 1625, 1628, 1629, 1631, 1607, 1616, 1619, + /* 350 */ 1620, 1606, 1621, 1623, 1630, 1626, 1632, 1636, 1633, 1637, + /* 360 */ 1638, 1531, 1541, 1567, 1571, 1640, 1597, 1599, 1601, 1603, + /* 370 */ 1608, 1610, 1611, 1627, 1664, 1549, 1550, 1609, 1634, 1639, + /* 380 */ 1641, 1602, 1676, 1642, 1646, 1644, 1650, 1654, 1683, 1694, + /* 390 */ 1707, 1711, 1712, 1714, 1643, 1647, 1652, 1698, 1695, 1696, + /* 400 */ 1697, 1699, 1700, 1689, 1691, 1701, 1702, 1704, 1715, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1623, 1623, 1623, 1453, 1223, 1332, 1223, 1223, 1223, 1453, - /* 10 */ 1453, 1453, 1223, 1362, 1362, 1506, 1254, 1223, 1223, 1223, - /* 20 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1452, 1223, 1223, - /* 30 */ 1223, 1223, 1541, 1541, 1223, 1223, 1223, 1223, 1223, 1223, - /* 40 */ 1223, 1223, 1371, 1223, 1378, 1223, 1223, 1223, 1223, 1223, - /* 50 */ 1454, 1455, 1223, 1223, 1223, 1505, 1507, 1470, 1385, 1384, - /* 60 */ 1383, 1382, 1488, 1349, 1376, 1369, 1373, 1448, 1449, 1447, - /* 70 */ 1451, 1455, 1454, 1223, 1372, 1419, 1433, 1418, 1223, 1223, - /* 80 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 90 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 100 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 110 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 120 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1427, 1432, 1438, - /* 130 */ 1431, 1428, 1421, 1420, 1422, 1223, 1423, 1223, 1223, 1223, - /* 140 */ 1244, 1296, 1223, 1223, 1223, 1223, 1223, 1525, 1524, 1223, - /* 150 */ 1223, 1254, 1413, 1412, 1424, 1425, 1435, 1434, 1513, 1576, - /* 160 */ 1575, 1471, 1223, 1223, 1223, 1223, 1223, 1223, 1541, 1223, - /* 170 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 180 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 190 */ 1223, 1541, 1541, 1223, 1254, 1541, 1541, 1250, 1250, 1356, - /* 200 */ 1223, 1520, 1323, 1323, 1323, 1323, 1332, 1323, 1223, 1223, - /* 210 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 220 */ 1223, 1223, 1223, 1510, 1508, 1223, 1223, 1223, 1223, 1223, - /* 230 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 240 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 250 */ 1223, 1223, 1328, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 260 */ 1223, 1223, 1223, 1223, 1570, 1223, 1483, 1310, 1328, 1328, - /* 270 */ 1328, 1328, 1330, 1311, 1309, 1322, 1255, 1230, 1615, 1388, - /* 280 */ 1377, 1329, 1351, 1377, 1351, 1612, 1375, 1388, 1388, 1375, - /* 290 */ 1388, 1329, 1612, 1271, 1592, 1266, 1362, 1362, 1362, 1351, - /* 300 */ 1356, 1356, 1450, 1329, 1322, 1223, 1615, 1615, 1337, 1337, - /* 310 */ 1614, 1614, 1337, 1471, 1599, 1397, 1299, 1305, 1305, 1305, - /* 320 */ 1305, 1337, 1241, 1375, 1599, 1599, 1375, 1397, 1299, 1375, - /* 330 */ 1299, 1375, 1337, 1241, 1487, 1609, 1337, 1241, 1461, 1337, - /* 340 */ 1241, 1337, 1241, 1461, 1297, 1297, 1297, 1286, 1223, 1223, - /* 350 */ 1461, 1297, 1271, 1297, 1286, 1297, 1297, 1559, 1223, 1465, - /* 360 */ 1465, 1461, 1355, 1350, 1355, 1350, 1355, 1350, 1355, 1350, - /* 370 */ 1337, 1551, 1551, 1365, 1365, 1370, 1356, 1456, 1337, 1223, - /* 380 */ 1370, 1368, 1366, 1375, 1247, 1289, 1573, 1573, 1569, 1569, - /* 390 */ 1569, 1620, 1620, 1520, 1585, 1254, 1254, 1254, 1254, 1585, - /* 400 */ 1273, 1273, 1255, 1255, 1254, 1585, 1223, 1223, 1223, 1223, - /* 410 */ 1223, 1223, 1580, 1223, 1515, 1472, 1341, 1223, 1223, 1223, - /* 420 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 430 */ 1223, 1526, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 440 */ 1223, 1223, 1402, 1223, 1226, 1517, 1223, 1223, 1223, 1223, - /* 450 */ 1223, 1223, 1223, 1223, 1379, 1380, 1342, 1223, 1223, 1223, - /* 460 */ 1223, 1223, 1223, 1223, 1394, 1223, 1223, 1223, 1389, 1223, - /* 470 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1611, 1223, 1223, - /* 480 */ 1223, 1223, 1223, 1223, 1486, 1485, 1223, 1223, 1339, 1223, - /* 490 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 500 */ 1223, 1223, 1269, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 510 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 520 */ 1223, 1223, 1223, 1223, 1223, 1223, 1367, 1223, 1223, 1223, - /* 530 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 540 */ 1223, 1556, 1357, 1223, 1223, 1602, 1223, 1223, 1223, 1223, - /* 550 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, - /* 560 */ 1596, 1313, 1404, 1223, 1403, 1407, 1223, 1235, 1223, 1223, + /* 0 */ 1637, 1637, 1637, 1466, 1233, 1344, 1233, 1233, 1233, 1466, + /* 10 */ 1466, 1466, 1233, 1374, 1374, 1519, 1266, 1233, 1233, 1233, + /* 20 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1465, 1233, 1233, + /* 30 */ 1233, 1233, 1554, 1554, 1233, 1233, 1233, 1233, 1233, 1233, + /* 40 */ 1233, 1233, 1383, 1233, 1390, 1233, 1233, 1233, 1233, 1233, + /* 50 */ 1467, 1468, 1233, 1233, 1233, 1518, 1520, 1483, 1397, 1396, + /* 60 */ 1395, 1394, 1501, 1361, 1388, 1381, 1385, 1461, 1462, 1460, + /* 70 */ 1464, 1468, 1467, 1233, 1384, 1431, 1445, 1430, 1233, 1233, + /* 80 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 90 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 100 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 110 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 120 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1439, 1444, + /* 130 */ 1451, 1443, 1440, 1433, 1432, 1434, 1435, 1233, 1233, 1257, + /* 140 */ 1233, 1233, 1254, 1308, 1233, 1233, 1233, 1233, 1233, 1538, + /* 150 */ 1537, 1233, 1436, 1233, 1266, 1425, 1424, 1448, 1437, 1447, + /* 160 */ 1446, 1526, 1590, 1589, 1484, 1233, 1233, 1233, 1233, 1233, + /* 170 */ 1233, 1554, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 180 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 190 */ 1233, 1233, 1233, 1233, 1233, 1554, 1554, 1233, 1266, 1554, + /* 200 */ 1554, 1262, 1262, 1368, 1233, 1533, 1335, 1335, 1335, 1335, + /* 210 */ 1344, 1335, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 220 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1523, 1521, 1233, + /* 230 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 240 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 250 */ 1233, 1233, 1233, 1233, 1233, 1233, 1340, 1233, 1233, 1233, + /* 260 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1583, 1233, + /* 270 */ 1496, 1322, 1340, 1340, 1340, 1340, 1342, 1323, 1321, 1334, + /* 280 */ 1267, 1240, 1629, 1400, 1389, 1341, 1363, 1389, 1363, 1626, + /* 290 */ 1387, 1400, 1400, 1387, 1400, 1341, 1626, 1283, 1606, 1278, + /* 300 */ 1374, 1374, 1374, 1363, 1368, 1368, 1463, 1341, 1334, 1233, + /* 310 */ 1629, 1629, 1349, 1349, 1628, 1628, 1349, 1484, 1613, 1409, + /* 320 */ 1311, 1317, 1317, 1317, 1317, 1349, 1251, 1387, 1613, 1613, + /* 330 */ 1387, 1409, 1311, 1387, 1311, 1387, 1349, 1251, 1500, 1623, + /* 340 */ 1349, 1251, 1474, 1349, 1251, 1349, 1251, 1474, 1309, 1309, + /* 350 */ 1309, 1298, 1233, 1233, 1474, 1309, 1283, 1309, 1298, 1309, + /* 360 */ 1309, 1572, 1233, 1478, 1478, 1474, 1367, 1362, 1367, 1362, + /* 370 */ 1367, 1362, 1367, 1362, 1349, 1564, 1564, 1377, 1377, 1382, + /* 380 */ 1368, 1469, 1349, 1233, 1382, 1380, 1378, 1387, 1301, 1586, + /* 390 */ 1586, 1582, 1582, 1582, 1634, 1634, 1533, 1599, 1266, 1266, + /* 400 */ 1266, 1266, 1599, 1285, 1285, 1267, 1267, 1266, 1599, 1233, + /* 410 */ 1233, 1233, 1233, 1233, 1233, 1594, 1233, 1528, 1485, 1353, + /* 420 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 430 */ 1233, 1233, 1233, 1233, 1539, 1233, 1233, 1233, 1233, 1233, + /* 440 */ 1233, 1233, 1233, 1233, 1233, 1414, 1233, 1236, 1530, 1233, + /* 450 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1391, 1392, 1354, + /* 460 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1406, 1233, 1233, + /* 470 */ 1233, 1401, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 480 */ 1625, 1233, 1233, 1233, 1233, 1233, 1233, 1499, 1498, 1233, + /* 490 */ 1233, 1351, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 500 */ 1233, 1233, 1233, 1233, 1233, 1281, 1233, 1233, 1233, 1233, + /* 510 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 520 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1379, + /* 530 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 540 */ 1233, 1233, 1233, 1233, 1569, 1369, 1233, 1233, 1616, 1233, + /* 550 */ 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, 1233, + /* 560 */ 1233, 1233, 1233, 1610, 1325, 1416, 1233, 1415, 1419, 1255, + /* 570 */ 1233, 1245, 1233, 1233, }; /********** End of lemon-generated parsing tables *****************************/ @@ -159224,8 +162641,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* LP => nothing */ 0, /* RP => nothing */ 0, /* AS => nothing */ - 59, /* WITHOUT => ID */ 0, /* COMMA => nothing */ + 59, /* WITHOUT => ID */ 59, /* ABORT => ID */ 59, /* ACTION => ID */ 59, /* AFTER => ID */ @@ -159311,6 +162728,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* SLASH => nothing */ 0, /* REM => nothing */ 0, /* CONCAT => nothing */ + 0, /* PTR => nothing */ 0, /* COLLATE => nothing */ 0, /* BITNOT => nothing */ 0, /* ON => nothing */ @@ -159434,9 +162852,9 @@ struct yyParser { }; typedef struct yyParser yyParser; +/* #include */ #ifndef NDEBUG /* #include */ -/* #include */ static FILE *yyTraceFILE = 0; static char *yyTracePrompt = 0; #endif /* NDEBUG */ @@ -159496,8 +162914,8 @@ static const char *const yyTokenName[] = { /* 22 */ "LP", /* 23 */ "RP", /* 24 */ "AS", - /* 25 */ "WITHOUT", - /* 26 */ "COMMA", + /* 25 */ "COMMA", + /* 26 */ "WITHOUT", /* 27 */ "ABORT", /* 28 */ "ACTION", /* 29 */ "AFTER", @@ -159583,211 +163001,213 @@ static const char *const yyTokenName[] = { /* 109 */ "SLASH", /* 110 */ "REM", /* 111 */ "CONCAT", - /* 112 */ "COLLATE", - /* 113 */ "BITNOT", - /* 114 */ "ON", - /* 115 */ "INDEXED", - /* 116 */ "STRING", - /* 117 */ "JOIN_KW", - /* 118 */ "CONSTRAINT", - /* 119 */ "DEFAULT", - /* 120 */ "NULL", - /* 121 */ "PRIMARY", - /* 122 */ "UNIQUE", - /* 123 */ "CHECK", - /* 124 */ "REFERENCES", - /* 125 */ "AUTOINCR", - /* 126 */ "INSERT", - /* 127 */ "DELETE", - /* 128 */ "UPDATE", - /* 129 */ "SET", - /* 130 */ "DEFERRABLE", - /* 131 */ "FOREIGN", - /* 132 */ "DROP", - /* 133 */ "UNION", - /* 134 */ "ALL", - /* 135 */ "EXCEPT", - /* 136 */ "INTERSECT", - /* 137 */ "SELECT", - /* 138 */ "VALUES", - /* 139 */ "DISTINCT", - /* 140 */ "DOT", - /* 141 */ "FROM", - /* 142 */ "JOIN", - /* 143 */ "USING", - /* 144 */ "ORDER", - /* 145 */ "GROUP", - /* 146 */ "HAVING", - /* 147 */ "LIMIT", - /* 148 */ "WHERE", - /* 149 */ "RETURNING", - /* 150 */ "INTO", - /* 151 */ "NOTHING", - /* 152 */ "FLOAT", - /* 153 */ "BLOB", - /* 154 */ "INTEGER", - /* 155 */ "VARIABLE", - /* 156 */ "CASE", - /* 157 */ "WHEN", - /* 158 */ "THEN", - /* 159 */ "ELSE", - /* 160 */ "INDEX", - /* 161 */ "ALTER", - /* 162 */ "ADD", - /* 163 */ "WINDOW", - /* 164 */ "OVER", - /* 165 */ "FILTER", - /* 166 */ "COLUMN", - /* 167 */ "AGG_FUNCTION", - /* 168 */ "AGG_COLUMN", - /* 169 */ "TRUEFALSE", - /* 170 */ "ISNOT", - /* 171 */ "FUNCTION", - /* 172 */ "UMINUS", - /* 173 */ "UPLUS", - /* 174 */ "TRUTH", - /* 175 */ "REGISTER", - /* 176 */ "VECTOR", - /* 177 */ "SELECT_COLUMN", - /* 178 */ "IF_NULL_ROW", - /* 179 */ "ASTERISK", - /* 180 */ "SPAN", - /* 181 */ "ERROR", - /* 182 */ "SPACE", - /* 183 */ "ILLEGAL", - /* 184 */ "input", - /* 185 */ "cmdlist", - /* 186 */ "ecmd", - /* 187 */ "cmdx", - /* 188 */ "explain", - /* 189 */ "cmd", - /* 190 */ "transtype", - /* 191 */ "trans_opt", - /* 192 */ "nm", - /* 193 */ "savepoint_opt", - /* 194 */ "create_table", - /* 195 */ "create_table_args", - /* 196 */ "createkw", - /* 197 */ "temp", - /* 198 */ "ifnotexists", - /* 199 */ "dbnm", - /* 200 */ "columnlist", - /* 201 */ "conslist_opt", - /* 202 */ "table_options", - /* 203 */ "select", - /* 204 */ "columnname", - /* 205 */ "carglist", - /* 206 */ "typetoken", - /* 207 */ "typename", - /* 208 */ "signed", - /* 209 */ "plus_num", - /* 210 */ "minus_num", - /* 211 */ "scanpt", - /* 212 */ "scantok", - /* 213 */ "ccons", - /* 214 */ "term", - /* 215 */ "expr", - /* 216 */ "onconf", - /* 217 */ "sortorder", - /* 218 */ "autoinc", - /* 219 */ "eidlist_opt", - /* 220 */ "refargs", - /* 221 */ "defer_subclause", - /* 222 */ "generated", - /* 223 */ "refarg", - /* 224 */ "refact", - /* 225 */ "init_deferred_pred_opt", - /* 226 */ "conslist", - /* 227 */ "tconscomma", - /* 228 */ "tcons", - /* 229 */ "sortlist", - /* 230 */ "eidlist", - /* 231 */ "defer_subclause_opt", - /* 232 */ "orconf", - /* 233 */ "resolvetype", - /* 234 */ "raisetype", - /* 235 */ "ifexists", - /* 236 */ "fullname", - /* 237 */ "selectnowith", - /* 238 */ "oneselect", - /* 239 */ "wqlist", - /* 240 */ "multiselect_op", - /* 241 */ "distinct", - /* 242 */ "selcollist", - /* 243 */ "from", - /* 244 */ "where_opt", - /* 245 */ "groupby_opt", - /* 246 */ "having_opt", - /* 247 */ "orderby_opt", - /* 248 */ "limit_opt", - /* 249 */ "window_clause", - /* 250 */ "values", - /* 251 */ "nexprlist", - /* 252 */ "sclp", - /* 253 */ "as", - /* 254 */ "seltablist", - /* 255 */ "stl_prefix", - /* 256 */ "joinop", - /* 257 */ "indexed_opt", - /* 258 */ "on_opt", - /* 259 */ "using_opt", - /* 260 */ "exprlist", - /* 261 */ "xfullname", - /* 262 */ "idlist", - /* 263 */ "nulls", - /* 264 */ "with", - /* 265 */ "where_opt_ret", - /* 266 */ "setlist", - /* 267 */ "insert_cmd", - /* 268 */ "idlist_opt", - /* 269 */ "upsert", - /* 270 */ "returning", - /* 271 */ "filter_over", - /* 272 */ "likeop", - /* 273 */ "between_op", - /* 274 */ "in_op", - /* 275 */ "paren_exprlist", - /* 276 */ "case_operand", - /* 277 */ "case_exprlist", - /* 278 */ "case_else", - /* 279 */ "uniqueflag", - /* 280 */ "collate", - /* 281 */ "vinto", - /* 282 */ "nmnum", - /* 283 */ "trigger_decl", - /* 284 */ "trigger_cmd_list", - /* 285 */ "trigger_time", - /* 286 */ "trigger_event", - /* 287 */ "foreach_clause", - /* 288 */ "when_clause", - /* 289 */ "trigger_cmd", - /* 290 */ "trnm", - /* 291 */ "tridxby", - /* 292 */ "database_kw_opt", - /* 293 */ "key_opt", - /* 294 */ "add_column_fullname", - /* 295 */ "kwcolumn_opt", - /* 296 */ "create_vtab", - /* 297 */ "vtabarglist", - /* 298 */ "vtabarg", - /* 299 */ "vtabargtoken", - /* 300 */ "lp", - /* 301 */ "anylist", - /* 302 */ "wqitem", - /* 303 */ "wqas", - /* 304 */ "windowdefn_list", - /* 305 */ "windowdefn", - /* 306 */ "window", - /* 307 */ "frame_opt", - /* 308 */ "part_opt", - /* 309 */ "filter_clause", - /* 310 */ "over_clause", - /* 311 */ "range_or_rows", - /* 312 */ "frame_bound", - /* 313 */ "frame_bound_s", - /* 314 */ "frame_bound_e", - /* 315 */ "frame_exclude_opt", - /* 316 */ "frame_exclude", + /* 112 */ "PTR", + /* 113 */ "COLLATE", + /* 114 */ "BITNOT", + /* 115 */ "ON", + /* 116 */ "INDEXED", + /* 117 */ "STRING", + /* 118 */ "JOIN_KW", + /* 119 */ "CONSTRAINT", + /* 120 */ "DEFAULT", + /* 121 */ "NULL", + /* 122 */ "PRIMARY", + /* 123 */ "UNIQUE", + /* 124 */ "CHECK", + /* 125 */ "REFERENCES", + /* 126 */ "AUTOINCR", + /* 127 */ "INSERT", + /* 128 */ "DELETE", + /* 129 */ "UPDATE", + /* 130 */ "SET", + /* 131 */ "DEFERRABLE", + /* 132 */ "FOREIGN", + /* 133 */ "DROP", + /* 134 */ "UNION", + /* 135 */ "ALL", + /* 136 */ "EXCEPT", + /* 137 */ "INTERSECT", + /* 138 */ "SELECT", + /* 139 */ "VALUES", + /* 140 */ "DISTINCT", + /* 141 */ "DOT", + /* 142 */ "FROM", + /* 143 */ "JOIN", + /* 144 */ "USING", + /* 145 */ "ORDER", + /* 146 */ "GROUP", + /* 147 */ "HAVING", + /* 148 */ "LIMIT", + /* 149 */ "WHERE", + /* 150 */ "RETURNING", + /* 151 */ "INTO", + /* 152 */ "NOTHING", + /* 153 */ "FLOAT", + /* 154 */ "BLOB", + /* 155 */ "INTEGER", + /* 156 */ "VARIABLE", + /* 157 */ "CASE", + /* 158 */ "WHEN", + /* 159 */ "THEN", + /* 160 */ "ELSE", + /* 161 */ "INDEX", + /* 162 */ "ALTER", + /* 163 */ "ADD", + /* 164 */ "WINDOW", + /* 165 */ "OVER", + /* 166 */ "FILTER", + /* 167 */ "COLUMN", + /* 168 */ "AGG_FUNCTION", + /* 169 */ "AGG_COLUMN", + /* 170 */ "TRUEFALSE", + /* 171 */ "ISNOT", + /* 172 */ "FUNCTION", + /* 173 */ "UMINUS", + /* 174 */ "UPLUS", + /* 175 */ "TRUTH", + /* 176 */ "REGISTER", + /* 177 */ "VECTOR", + /* 178 */ "SELECT_COLUMN", + /* 179 */ "IF_NULL_ROW", + /* 180 */ "ASTERISK", + /* 181 */ "SPAN", + /* 182 */ "ERROR", + /* 183 */ "SPACE", + /* 184 */ "ILLEGAL", + /* 185 */ "input", + /* 186 */ "cmdlist", + /* 187 */ "ecmd", + /* 188 */ "cmdx", + /* 189 */ "explain", + /* 190 */ "cmd", + /* 191 */ "transtype", + /* 192 */ "trans_opt", + /* 193 */ "nm", + /* 194 */ "savepoint_opt", + /* 195 */ "create_table", + /* 196 */ "create_table_args", + /* 197 */ "createkw", + /* 198 */ "temp", + /* 199 */ "ifnotexists", + /* 200 */ "dbnm", + /* 201 */ "columnlist", + /* 202 */ "conslist_opt", + /* 203 */ "table_option_set", + /* 204 */ "select", + /* 205 */ "table_option", + /* 206 */ "columnname", + /* 207 */ "carglist", + /* 208 */ "typetoken", + /* 209 */ "typename", + /* 210 */ "signed", + /* 211 */ "plus_num", + /* 212 */ "minus_num", + /* 213 */ "scanpt", + /* 214 */ "scantok", + /* 215 */ "ccons", + /* 216 */ "term", + /* 217 */ "expr", + /* 218 */ "onconf", + /* 219 */ "sortorder", + /* 220 */ "autoinc", + /* 221 */ "eidlist_opt", + /* 222 */ "refargs", + /* 223 */ "defer_subclause", + /* 224 */ "generated", + /* 225 */ "refarg", + /* 226 */ "refact", + /* 227 */ "init_deferred_pred_opt", + /* 228 */ "conslist", + /* 229 */ "tconscomma", + /* 230 */ "tcons", + /* 231 */ "sortlist", + /* 232 */ "eidlist", + /* 233 */ "defer_subclause_opt", + /* 234 */ "orconf", + /* 235 */ "resolvetype", + /* 236 */ "raisetype", + /* 237 */ "ifexists", + /* 238 */ "fullname", + /* 239 */ "selectnowith", + /* 240 */ "oneselect", + /* 241 */ "wqlist", + /* 242 */ "multiselect_op", + /* 243 */ "distinct", + /* 244 */ "selcollist", + /* 245 */ "from", + /* 246 */ "where_opt", + /* 247 */ "groupby_opt", + /* 248 */ "having_opt", + /* 249 */ "orderby_opt", + /* 250 */ "limit_opt", + /* 251 */ "window_clause", + /* 252 */ "values", + /* 253 */ "nexprlist", + /* 254 */ "sclp", + /* 255 */ "as", + /* 256 */ "seltablist", + /* 257 */ "stl_prefix", + /* 258 */ "joinop", + /* 259 */ "indexed_opt", + /* 260 */ "on_opt", + /* 261 */ "using_opt", + /* 262 */ "exprlist", + /* 263 */ "xfullname", + /* 264 */ "idlist", + /* 265 */ "nulls", + /* 266 */ "with", + /* 267 */ "where_opt_ret", + /* 268 */ "setlist", + /* 269 */ "insert_cmd", + /* 270 */ "idlist_opt", + /* 271 */ "upsert", + /* 272 */ "returning", + /* 273 */ "filter_over", + /* 274 */ "likeop", + /* 275 */ "between_op", + /* 276 */ "in_op", + /* 277 */ "paren_exprlist", + /* 278 */ "case_operand", + /* 279 */ "case_exprlist", + /* 280 */ "case_else", + /* 281 */ "uniqueflag", + /* 282 */ "collate", + /* 283 */ "vinto", + /* 284 */ "nmnum", + /* 285 */ "trigger_decl", + /* 286 */ "trigger_cmd_list", + /* 287 */ "trigger_time", + /* 288 */ "trigger_event", + /* 289 */ "foreach_clause", + /* 290 */ "when_clause", + /* 291 */ "trigger_cmd", + /* 292 */ "trnm", + /* 293 */ "tridxby", + /* 294 */ "database_kw_opt", + /* 295 */ "key_opt", + /* 296 */ "add_column_fullname", + /* 297 */ "kwcolumn_opt", + /* 298 */ "create_vtab", + /* 299 */ "vtabarglist", + /* 300 */ "vtabarg", + /* 301 */ "vtabargtoken", + /* 302 */ "lp", + /* 303 */ "anylist", + /* 304 */ "wqitem", + /* 305 */ "wqas", + /* 306 */ "windowdefn_list", + /* 307 */ "windowdefn", + /* 308 */ "window", + /* 309 */ "frame_opt", + /* 310 */ "part_opt", + /* 311 */ "filter_clause", + /* 312 */ "over_clause", + /* 313 */ "range_or_rows", + /* 314 */ "frame_bound", + /* 315 */ "frame_bound_s", + /* 316 */ "frame_bound_e", + /* 317 */ "frame_exclude_opt", + /* 318 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -159814,385 +163234,389 @@ static const char *const yyRuleName[] = { /* 16 */ "ifnotexists ::= IF NOT EXISTS", /* 17 */ "temp ::= TEMP", /* 18 */ "temp ::=", - /* 19 */ "create_table_args ::= LP columnlist conslist_opt RP table_options", + /* 19 */ "create_table_args ::= LP columnlist conslist_opt RP table_option_set", /* 20 */ "create_table_args ::= AS select", - /* 21 */ "table_options ::=", - /* 22 */ "table_options ::= WITHOUT nm", - /* 23 */ "columnname ::= nm typetoken", - /* 24 */ "typetoken ::=", - /* 25 */ "typetoken ::= typename LP signed RP", - /* 26 */ "typetoken ::= typename LP signed COMMA signed RP", - /* 27 */ "typename ::= typename ID|STRING", - /* 28 */ "scanpt ::=", - /* 29 */ "scantok ::=", - /* 30 */ "ccons ::= CONSTRAINT nm", - /* 31 */ "ccons ::= DEFAULT scantok term", - /* 32 */ "ccons ::= DEFAULT LP expr RP", - /* 33 */ "ccons ::= DEFAULT PLUS scantok term", - /* 34 */ "ccons ::= DEFAULT MINUS scantok term", - /* 35 */ "ccons ::= DEFAULT scantok ID|INDEXED", - /* 36 */ "ccons ::= NOT NULL onconf", - /* 37 */ "ccons ::= PRIMARY KEY sortorder onconf autoinc", - /* 38 */ "ccons ::= UNIQUE onconf", - /* 39 */ "ccons ::= CHECK LP expr RP", - /* 40 */ "ccons ::= REFERENCES nm eidlist_opt refargs", - /* 41 */ "ccons ::= defer_subclause", - /* 42 */ "ccons ::= COLLATE ID|STRING", - /* 43 */ "generated ::= LP expr RP", - /* 44 */ "generated ::= LP expr RP ID", - /* 45 */ "autoinc ::=", - /* 46 */ "autoinc ::= AUTOINCR", - /* 47 */ "refargs ::=", - /* 48 */ "refargs ::= refargs refarg", - /* 49 */ "refarg ::= MATCH nm", - /* 50 */ "refarg ::= ON INSERT refact", - /* 51 */ "refarg ::= ON DELETE refact", - /* 52 */ "refarg ::= ON UPDATE refact", - /* 53 */ "refact ::= SET NULL", - /* 54 */ "refact ::= SET DEFAULT", - /* 55 */ "refact ::= CASCADE", - /* 56 */ "refact ::= RESTRICT", - /* 57 */ "refact ::= NO ACTION", - /* 58 */ "defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt", - /* 59 */ "defer_subclause ::= DEFERRABLE init_deferred_pred_opt", - /* 60 */ "init_deferred_pred_opt ::=", - /* 61 */ "init_deferred_pred_opt ::= INITIALLY DEFERRED", - /* 62 */ "init_deferred_pred_opt ::= INITIALLY IMMEDIATE", - /* 63 */ "conslist_opt ::=", - /* 64 */ "tconscomma ::= COMMA", - /* 65 */ "tcons ::= CONSTRAINT nm", - /* 66 */ "tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf", - /* 67 */ "tcons ::= UNIQUE LP sortlist RP onconf", - /* 68 */ "tcons ::= CHECK LP expr RP onconf", - /* 69 */ "tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt", - /* 70 */ "defer_subclause_opt ::=", - /* 71 */ "onconf ::=", - /* 72 */ "onconf ::= ON CONFLICT resolvetype", - /* 73 */ "orconf ::=", - /* 74 */ "orconf ::= OR resolvetype", - /* 75 */ "resolvetype ::= IGNORE", - /* 76 */ "resolvetype ::= REPLACE", - /* 77 */ "cmd ::= DROP TABLE ifexists fullname", - /* 78 */ "ifexists ::= IF EXISTS", - /* 79 */ "ifexists ::=", - /* 80 */ "cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select", - /* 81 */ "cmd ::= DROP VIEW ifexists fullname", - /* 82 */ "cmd ::= select", - /* 83 */ "select ::= WITH wqlist selectnowith", - /* 84 */ "select ::= WITH RECURSIVE wqlist selectnowith", - /* 85 */ "select ::= selectnowith", - /* 86 */ "selectnowith ::= selectnowith multiselect_op oneselect", - /* 87 */ "multiselect_op ::= UNION", - /* 88 */ "multiselect_op ::= UNION ALL", - /* 89 */ "multiselect_op ::= EXCEPT|INTERSECT", - /* 90 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt", - /* 91 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt", - /* 92 */ "values ::= VALUES LP nexprlist RP", - /* 93 */ "values ::= values COMMA LP nexprlist RP", - /* 94 */ "distinct ::= DISTINCT", - /* 95 */ "distinct ::= ALL", - /* 96 */ "distinct ::=", - /* 97 */ "sclp ::=", - /* 98 */ "selcollist ::= sclp scanpt expr scanpt as", - /* 99 */ "selcollist ::= sclp scanpt STAR", - /* 100 */ "selcollist ::= sclp scanpt nm DOT STAR", - /* 101 */ "as ::= AS nm", - /* 102 */ "as ::=", - /* 103 */ "from ::=", - /* 104 */ "from ::= FROM seltablist", - /* 105 */ "stl_prefix ::= seltablist joinop", - /* 106 */ "stl_prefix ::=", - /* 107 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt", - /* 108 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt", - /* 109 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt", - /* 110 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt", - /* 111 */ "dbnm ::=", - /* 112 */ "dbnm ::= DOT nm", - /* 113 */ "fullname ::= nm", - /* 114 */ "fullname ::= nm DOT nm", - /* 115 */ "xfullname ::= nm", - /* 116 */ "xfullname ::= nm DOT nm", - /* 117 */ "xfullname ::= nm DOT nm AS nm", - /* 118 */ "xfullname ::= nm AS nm", - /* 119 */ "joinop ::= COMMA|JOIN", - /* 120 */ "joinop ::= JOIN_KW JOIN", - /* 121 */ "joinop ::= JOIN_KW nm JOIN", - /* 122 */ "joinop ::= JOIN_KW nm nm JOIN", - /* 123 */ "on_opt ::= ON expr", - /* 124 */ "on_opt ::=", - /* 125 */ "indexed_opt ::=", - /* 126 */ "indexed_opt ::= INDEXED BY nm", - /* 127 */ "indexed_opt ::= NOT INDEXED", - /* 128 */ "using_opt ::= USING LP idlist RP", - /* 129 */ "using_opt ::=", - /* 130 */ "orderby_opt ::=", - /* 131 */ "orderby_opt ::= ORDER BY sortlist", - /* 132 */ "sortlist ::= sortlist COMMA expr sortorder nulls", - /* 133 */ "sortlist ::= expr sortorder nulls", - /* 134 */ "sortorder ::= ASC", - /* 135 */ "sortorder ::= DESC", - /* 136 */ "sortorder ::=", - /* 137 */ "nulls ::= NULLS FIRST", - /* 138 */ "nulls ::= NULLS LAST", - /* 139 */ "nulls ::=", - /* 140 */ "groupby_opt ::=", - /* 141 */ "groupby_opt ::= GROUP BY nexprlist", - /* 142 */ "having_opt ::=", - /* 143 */ "having_opt ::= HAVING expr", - /* 144 */ "limit_opt ::=", - /* 145 */ "limit_opt ::= LIMIT expr", - /* 146 */ "limit_opt ::= LIMIT expr OFFSET expr", - /* 147 */ "limit_opt ::= LIMIT expr COMMA expr", - /* 148 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret", - /* 149 */ "where_opt ::=", - /* 150 */ "where_opt ::= WHERE expr", - /* 151 */ "where_opt_ret ::=", - /* 152 */ "where_opt_ret ::= WHERE expr", - /* 153 */ "where_opt_ret ::= RETURNING selcollist", - /* 154 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", - /* 155 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret", - /* 156 */ "setlist ::= setlist COMMA nm EQ expr", - /* 157 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", - /* 158 */ "setlist ::= nm EQ expr", - /* 159 */ "setlist ::= LP idlist RP EQ expr", - /* 160 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", - /* 161 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", - /* 162 */ "upsert ::=", - /* 163 */ "upsert ::= RETURNING selcollist", - /* 164 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", - /* 165 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", - /* 166 */ "upsert ::= ON CONFLICT DO NOTHING returning", - /* 167 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", - /* 168 */ "returning ::= RETURNING selcollist", - /* 169 */ "insert_cmd ::= INSERT orconf", - /* 170 */ "insert_cmd ::= REPLACE", - /* 171 */ "idlist_opt ::=", - /* 172 */ "idlist_opt ::= LP idlist RP", - /* 173 */ "idlist ::= idlist COMMA nm", - /* 174 */ "idlist ::= nm", - /* 175 */ "expr ::= LP expr RP", - /* 176 */ "expr ::= ID|INDEXED", - /* 177 */ "expr ::= JOIN_KW", - /* 178 */ "expr ::= nm DOT nm", - /* 179 */ "expr ::= nm DOT nm DOT nm", - /* 180 */ "term ::= NULL|FLOAT|BLOB", - /* 181 */ "term ::= STRING", - /* 182 */ "term ::= INTEGER", - /* 183 */ "expr ::= VARIABLE", - /* 184 */ "expr ::= expr COLLATE ID|STRING", - /* 185 */ "expr ::= CAST LP expr AS typetoken RP", - /* 186 */ "expr ::= ID|INDEXED LP distinct exprlist RP", - /* 187 */ "expr ::= ID|INDEXED LP STAR RP", - /* 188 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", - /* 189 */ "expr ::= ID|INDEXED LP STAR RP filter_over", - /* 190 */ "term ::= CTIME_KW", - /* 191 */ "expr ::= LP nexprlist COMMA expr RP", - /* 192 */ "expr ::= expr AND expr", - /* 193 */ "expr ::= expr OR expr", - /* 194 */ "expr ::= expr LT|GT|GE|LE expr", - /* 195 */ "expr ::= expr EQ|NE expr", - /* 196 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 197 */ "expr ::= expr PLUS|MINUS expr", - /* 198 */ "expr ::= expr STAR|SLASH|REM expr", - /* 199 */ "expr ::= expr CONCAT expr", - /* 200 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 201 */ "expr ::= expr likeop expr", - /* 202 */ "expr ::= expr likeop expr ESCAPE expr", - /* 203 */ "expr ::= expr ISNULL|NOTNULL", - /* 204 */ "expr ::= expr NOT NULL", - /* 205 */ "expr ::= expr IS expr", - /* 206 */ "expr ::= expr IS NOT expr", - /* 207 */ "expr ::= NOT expr", - /* 208 */ "expr ::= BITNOT expr", - /* 209 */ "expr ::= PLUS|MINUS expr", - /* 210 */ "between_op ::= BETWEEN", - /* 211 */ "between_op ::= NOT BETWEEN", - /* 212 */ "expr ::= expr between_op expr AND expr", - /* 213 */ "in_op ::= IN", - /* 214 */ "in_op ::= NOT IN", - /* 215 */ "expr ::= expr in_op LP exprlist RP", - /* 216 */ "expr ::= LP select RP", - /* 217 */ "expr ::= expr in_op LP select RP", - /* 218 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 219 */ "expr ::= EXISTS LP select RP", - /* 220 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 221 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 222 */ "case_exprlist ::= WHEN expr THEN expr", - /* 223 */ "case_else ::= ELSE expr", - /* 224 */ "case_else ::=", - /* 225 */ "case_operand ::= expr", - /* 226 */ "case_operand ::=", - /* 227 */ "exprlist ::=", - /* 228 */ "nexprlist ::= nexprlist COMMA expr", - /* 229 */ "nexprlist ::= expr", - /* 230 */ "paren_exprlist ::=", - /* 231 */ "paren_exprlist ::= LP exprlist RP", - /* 232 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", - /* 233 */ "uniqueflag ::= UNIQUE", - /* 234 */ "uniqueflag ::=", - /* 235 */ "eidlist_opt ::=", - /* 236 */ "eidlist_opt ::= LP eidlist RP", - /* 237 */ "eidlist ::= eidlist COMMA nm collate sortorder", - /* 238 */ "eidlist ::= nm collate sortorder", - /* 239 */ "collate ::=", - /* 240 */ "collate ::= COLLATE ID|STRING", - /* 241 */ "cmd ::= DROP INDEX ifexists fullname", - /* 242 */ "cmd ::= VACUUM vinto", - /* 243 */ "cmd ::= VACUUM nm vinto", - /* 244 */ "vinto ::= INTO expr", - /* 245 */ "vinto ::=", - /* 246 */ "cmd ::= PRAGMA nm dbnm", - /* 247 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 248 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 249 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 250 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 251 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 252 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 253 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 254 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 255 */ "trigger_time ::= BEFORE|AFTER", - /* 256 */ "trigger_time ::= INSTEAD OF", - /* 257 */ "trigger_time ::=", - /* 258 */ "trigger_event ::= DELETE|INSERT", - /* 259 */ "trigger_event ::= UPDATE", - /* 260 */ "trigger_event ::= UPDATE OF idlist", - /* 261 */ "when_clause ::=", - /* 262 */ "when_clause ::= WHEN expr", - /* 263 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 264 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 265 */ "trnm ::= nm DOT nm", - /* 266 */ "tridxby ::= INDEXED BY nm", - /* 267 */ "tridxby ::= NOT INDEXED", - /* 268 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", - /* 269 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", - /* 270 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", - /* 271 */ "trigger_cmd ::= scanpt select scanpt", - /* 272 */ "expr ::= RAISE LP IGNORE RP", - /* 273 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 274 */ "raisetype ::= ROLLBACK", - /* 275 */ "raisetype ::= ABORT", - /* 276 */ "raisetype ::= FAIL", - /* 277 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 278 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 279 */ "cmd ::= DETACH database_kw_opt expr", - /* 280 */ "key_opt ::=", - /* 281 */ "key_opt ::= KEY expr", - /* 282 */ "cmd ::= REINDEX", - /* 283 */ "cmd ::= REINDEX nm dbnm", - /* 284 */ "cmd ::= ANALYZE", - /* 285 */ "cmd ::= ANALYZE nm dbnm", - /* 286 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 287 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", - /* 288 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", - /* 289 */ "add_column_fullname ::= fullname", - /* 290 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", - /* 291 */ "cmd ::= create_vtab", - /* 292 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 293 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 294 */ "vtabarg ::=", - /* 295 */ "vtabargtoken ::= ANY", - /* 296 */ "vtabargtoken ::= lp anylist RP", - /* 297 */ "lp ::= LP", - /* 298 */ "with ::= WITH wqlist", - /* 299 */ "with ::= WITH RECURSIVE wqlist", - /* 300 */ "wqas ::= AS", - /* 301 */ "wqas ::= AS MATERIALIZED", - /* 302 */ "wqas ::= AS NOT MATERIALIZED", - /* 303 */ "wqitem ::= nm eidlist_opt wqas LP select RP", - /* 304 */ "wqlist ::= wqitem", - /* 305 */ "wqlist ::= wqlist COMMA wqitem", - /* 306 */ "windowdefn_list ::= windowdefn", - /* 307 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 308 */ "windowdefn ::= nm AS LP window RP", - /* 309 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 310 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 311 */ "window ::= ORDER BY sortlist frame_opt", - /* 312 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 313 */ "window ::= frame_opt", - /* 314 */ "window ::= nm frame_opt", - /* 315 */ "frame_opt ::=", - /* 316 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 317 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 318 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 319 */ "frame_bound_s ::= frame_bound", - /* 320 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 321 */ "frame_bound_e ::= frame_bound", - /* 322 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 323 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 324 */ "frame_bound ::= CURRENT ROW", - /* 325 */ "frame_exclude_opt ::=", - /* 326 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 327 */ "frame_exclude ::= NO OTHERS", - /* 328 */ "frame_exclude ::= CURRENT ROW", - /* 329 */ "frame_exclude ::= GROUP|TIES", - /* 330 */ "window_clause ::= WINDOW windowdefn_list", - /* 331 */ "filter_over ::= filter_clause over_clause", - /* 332 */ "filter_over ::= over_clause", - /* 333 */ "filter_over ::= filter_clause", - /* 334 */ "over_clause ::= OVER LP window RP", - /* 335 */ "over_clause ::= OVER nm", - /* 336 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 337 */ "input ::= cmdlist", - /* 338 */ "cmdlist ::= cmdlist ecmd", - /* 339 */ "cmdlist ::= ecmd", - /* 340 */ "ecmd ::= SEMI", - /* 341 */ "ecmd ::= cmdx SEMI", - /* 342 */ "ecmd ::= explain cmdx SEMI", - /* 343 */ "trans_opt ::=", - /* 344 */ "trans_opt ::= TRANSACTION", - /* 345 */ "trans_opt ::= TRANSACTION nm", - /* 346 */ "savepoint_opt ::= SAVEPOINT", - /* 347 */ "savepoint_opt ::=", - /* 348 */ "cmd ::= create_table create_table_args", - /* 349 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 350 */ "columnlist ::= columnname carglist", - /* 351 */ "nm ::= ID|INDEXED", - /* 352 */ "nm ::= STRING", - /* 353 */ "nm ::= JOIN_KW", - /* 354 */ "typetoken ::= typename", - /* 355 */ "typename ::= ID|STRING", - /* 356 */ "signed ::= plus_num", - /* 357 */ "signed ::= minus_num", - /* 358 */ "carglist ::= carglist ccons", - /* 359 */ "carglist ::=", - /* 360 */ "ccons ::= NULL onconf", - /* 361 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 362 */ "ccons ::= AS generated", - /* 363 */ "conslist_opt ::= COMMA conslist", - /* 364 */ "conslist ::= conslist tconscomma tcons", - /* 365 */ "conslist ::= tcons", - /* 366 */ "tconscomma ::=", - /* 367 */ "defer_subclause_opt ::= defer_subclause", - /* 368 */ "resolvetype ::= raisetype", - /* 369 */ "selectnowith ::= oneselect", - /* 370 */ "oneselect ::= values", - /* 371 */ "sclp ::= selcollist COMMA", - /* 372 */ "as ::= ID|STRING", - /* 373 */ "returning ::=", - /* 374 */ "expr ::= term", - /* 375 */ "likeop ::= LIKE_KW|MATCH", - /* 376 */ "exprlist ::= nexprlist", - /* 377 */ "nmnum ::= plus_num", - /* 378 */ "nmnum ::= nm", - /* 379 */ "nmnum ::= ON", - /* 380 */ "nmnum ::= DELETE", - /* 381 */ "nmnum ::= DEFAULT", - /* 382 */ "plus_num ::= INTEGER|FLOAT", - /* 383 */ "foreach_clause ::=", - /* 384 */ "foreach_clause ::= FOR EACH ROW", - /* 385 */ "trnm ::= nm", - /* 386 */ "tridxby ::=", - /* 387 */ "database_kw_opt ::= DATABASE", - /* 388 */ "database_kw_opt ::=", - /* 389 */ "kwcolumn_opt ::=", - /* 390 */ "kwcolumn_opt ::= COLUMNKW", - /* 391 */ "vtabarglist ::= vtabarg", - /* 392 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 393 */ "vtabarg ::= vtabarg vtabargtoken", - /* 394 */ "anylist ::=", - /* 395 */ "anylist ::= anylist LP anylist RP", - /* 396 */ "anylist ::= anylist ANY", - /* 397 */ "with ::=", + /* 21 */ "table_option_set ::=", + /* 22 */ "table_option_set ::= table_option_set COMMA table_option", + /* 23 */ "table_option ::= WITHOUT nm", + /* 24 */ "table_option ::= nm", + /* 25 */ "columnname ::= nm typetoken", + /* 26 */ "typetoken ::=", + /* 27 */ "typetoken ::= typename LP signed RP", + /* 28 */ "typetoken ::= typename LP signed COMMA signed RP", + /* 29 */ "typename ::= typename ID|STRING", + /* 30 */ "scanpt ::=", + /* 31 */ "scantok ::=", + /* 32 */ "ccons ::= CONSTRAINT nm", + /* 33 */ "ccons ::= DEFAULT scantok term", + /* 34 */ "ccons ::= DEFAULT LP expr RP", + /* 35 */ "ccons ::= DEFAULT PLUS scantok term", + /* 36 */ "ccons ::= DEFAULT MINUS scantok term", + /* 37 */ "ccons ::= DEFAULT scantok ID|INDEXED", + /* 38 */ "ccons ::= NOT NULL onconf", + /* 39 */ "ccons ::= PRIMARY KEY sortorder onconf autoinc", + /* 40 */ "ccons ::= UNIQUE onconf", + /* 41 */ "ccons ::= CHECK LP expr RP", + /* 42 */ "ccons ::= REFERENCES nm eidlist_opt refargs", + /* 43 */ "ccons ::= defer_subclause", + /* 44 */ "ccons ::= COLLATE ID|STRING", + /* 45 */ "generated ::= LP expr RP", + /* 46 */ "generated ::= LP expr RP ID", + /* 47 */ "autoinc ::=", + /* 48 */ "autoinc ::= AUTOINCR", + /* 49 */ "refargs ::=", + /* 50 */ "refargs ::= refargs refarg", + /* 51 */ "refarg ::= MATCH nm", + /* 52 */ "refarg ::= ON INSERT refact", + /* 53 */ "refarg ::= ON DELETE refact", + /* 54 */ "refarg ::= ON UPDATE refact", + /* 55 */ "refact ::= SET NULL", + /* 56 */ "refact ::= SET DEFAULT", + /* 57 */ "refact ::= CASCADE", + /* 58 */ "refact ::= RESTRICT", + /* 59 */ "refact ::= NO ACTION", + /* 60 */ "defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt", + /* 61 */ "defer_subclause ::= DEFERRABLE init_deferred_pred_opt", + /* 62 */ "init_deferred_pred_opt ::=", + /* 63 */ "init_deferred_pred_opt ::= INITIALLY DEFERRED", + /* 64 */ "init_deferred_pred_opt ::= INITIALLY IMMEDIATE", + /* 65 */ "conslist_opt ::=", + /* 66 */ "tconscomma ::= COMMA", + /* 67 */ "tcons ::= CONSTRAINT nm", + /* 68 */ "tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf", + /* 69 */ "tcons ::= UNIQUE LP sortlist RP onconf", + /* 70 */ "tcons ::= CHECK LP expr RP onconf", + /* 71 */ "tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt", + /* 72 */ "defer_subclause_opt ::=", + /* 73 */ "onconf ::=", + /* 74 */ "onconf ::= ON CONFLICT resolvetype", + /* 75 */ "orconf ::=", + /* 76 */ "orconf ::= OR resolvetype", + /* 77 */ "resolvetype ::= IGNORE", + /* 78 */ "resolvetype ::= REPLACE", + /* 79 */ "cmd ::= DROP TABLE ifexists fullname", + /* 80 */ "ifexists ::= IF EXISTS", + /* 81 */ "ifexists ::=", + /* 82 */ "cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select", + /* 83 */ "cmd ::= DROP VIEW ifexists fullname", + /* 84 */ "cmd ::= select", + /* 85 */ "select ::= WITH wqlist selectnowith", + /* 86 */ "select ::= WITH RECURSIVE wqlist selectnowith", + /* 87 */ "select ::= selectnowith", + /* 88 */ "selectnowith ::= selectnowith multiselect_op oneselect", + /* 89 */ "multiselect_op ::= UNION", + /* 90 */ "multiselect_op ::= UNION ALL", + /* 91 */ "multiselect_op ::= EXCEPT|INTERSECT", + /* 92 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt", + /* 93 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt", + /* 94 */ "values ::= VALUES LP nexprlist RP", + /* 95 */ "values ::= values COMMA LP nexprlist RP", + /* 96 */ "distinct ::= DISTINCT", + /* 97 */ "distinct ::= ALL", + /* 98 */ "distinct ::=", + /* 99 */ "sclp ::=", + /* 100 */ "selcollist ::= sclp scanpt expr scanpt as", + /* 101 */ "selcollist ::= sclp scanpt STAR", + /* 102 */ "selcollist ::= sclp scanpt nm DOT STAR", + /* 103 */ "as ::= AS nm", + /* 104 */ "as ::=", + /* 105 */ "from ::=", + /* 106 */ "from ::= FROM seltablist", + /* 107 */ "stl_prefix ::= seltablist joinop", + /* 108 */ "stl_prefix ::=", + /* 109 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt", + /* 110 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt", + /* 111 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt", + /* 112 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt", + /* 113 */ "dbnm ::=", + /* 114 */ "dbnm ::= DOT nm", + /* 115 */ "fullname ::= nm", + /* 116 */ "fullname ::= nm DOT nm", + /* 117 */ "xfullname ::= nm", + /* 118 */ "xfullname ::= nm DOT nm", + /* 119 */ "xfullname ::= nm DOT nm AS nm", + /* 120 */ "xfullname ::= nm AS nm", + /* 121 */ "joinop ::= COMMA|JOIN", + /* 122 */ "joinop ::= JOIN_KW JOIN", + /* 123 */ "joinop ::= JOIN_KW nm JOIN", + /* 124 */ "joinop ::= JOIN_KW nm nm JOIN", + /* 125 */ "on_opt ::= ON expr", + /* 126 */ "on_opt ::=", + /* 127 */ "indexed_opt ::=", + /* 128 */ "indexed_opt ::= INDEXED BY nm", + /* 129 */ "indexed_opt ::= NOT INDEXED", + /* 130 */ "using_opt ::= USING LP idlist RP", + /* 131 */ "using_opt ::=", + /* 132 */ "orderby_opt ::=", + /* 133 */ "orderby_opt ::= ORDER BY sortlist", + /* 134 */ "sortlist ::= sortlist COMMA expr sortorder nulls", + /* 135 */ "sortlist ::= expr sortorder nulls", + /* 136 */ "sortorder ::= ASC", + /* 137 */ "sortorder ::= DESC", + /* 138 */ "sortorder ::=", + /* 139 */ "nulls ::= NULLS FIRST", + /* 140 */ "nulls ::= NULLS LAST", + /* 141 */ "nulls ::=", + /* 142 */ "groupby_opt ::=", + /* 143 */ "groupby_opt ::= GROUP BY nexprlist", + /* 144 */ "having_opt ::=", + /* 145 */ "having_opt ::= HAVING expr", + /* 146 */ "limit_opt ::=", + /* 147 */ "limit_opt ::= LIMIT expr", + /* 148 */ "limit_opt ::= LIMIT expr OFFSET expr", + /* 149 */ "limit_opt ::= LIMIT expr COMMA expr", + /* 150 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret", + /* 151 */ "where_opt ::=", + /* 152 */ "where_opt ::= WHERE expr", + /* 153 */ "where_opt_ret ::=", + /* 154 */ "where_opt_ret ::= WHERE expr", + /* 155 */ "where_opt_ret ::= RETURNING selcollist", + /* 156 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", + /* 157 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret", + /* 158 */ "setlist ::= setlist COMMA nm EQ expr", + /* 159 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", + /* 160 */ "setlist ::= nm EQ expr", + /* 161 */ "setlist ::= LP idlist RP EQ expr", + /* 162 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", + /* 163 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", + /* 164 */ "upsert ::=", + /* 165 */ "upsert ::= RETURNING selcollist", + /* 166 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", + /* 167 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", + /* 168 */ "upsert ::= ON CONFLICT DO NOTHING returning", + /* 169 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", + /* 170 */ "returning ::= RETURNING selcollist", + /* 171 */ "insert_cmd ::= INSERT orconf", + /* 172 */ "insert_cmd ::= REPLACE", + /* 173 */ "idlist_opt ::=", + /* 174 */ "idlist_opt ::= LP idlist RP", + /* 175 */ "idlist ::= idlist COMMA nm", + /* 176 */ "idlist ::= nm", + /* 177 */ "expr ::= LP expr RP", + /* 178 */ "expr ::= ID|INDEXED", + /* 179 */ "expr ::= JOIN_KW", + /* 180 */ "expr ::= nm DOT nm", + /* 181 */ "expr ::= nm DOT nm DOT nm", + /* 182 */ "term ::= NULL|FLOAT|BLOB", + /* 183 */ "term ::= STRING", + /* 184 */ "term ::= INTEGER", + /* 185 */ "expr ::= VARIABLE", + /* 186 */ "expr ::= expr COLLATE ID|STRING", + /* 187 */ "expr ::= CAST LP expr AS typetoken RP", + /* 188 */ "expr ::= ID|INDEXED LP distinct exprlist RP", + /* 189 */ "expr ::= ID|INDEXED LP STAR RP", + /* 190 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", + /* 191 */ "expr ::= ID|INDEXED LP STAR RP filter_over", + /* 192 */ "term ::= CTIME_KW", + /* 193 */ "expr ::= LP nexprlist COMMA expr RP", + /* 194 */ "expr ::= expr AND expr", + /* 195 */ "expr ::= expr OR expr", + /* 196 */ "expr ::= expr LT|GT|GE|LE expr", + /* 197 */ "expr ::= expr EQ|NE expr", + /* 198 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 199 */ "expr ::= expr PLUS|MINUS expr", + /* 200 */ "expr ::= expr STAR|SLASH|REM expr", + /* 201 */ "expr ::= expr CONCAT expr", + /* 202 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 203 */ "expr ::= expr likeop expr", + /* 204 */ "expr ::= expr likeop expr ESCAPE expr", + /* 205 */ "expr ::= expr ISNULL|NOTNULL", + /* 206 */ "expr ::= expr NOT NULL", + /* 207 */ "expr ::= expr IS expr", + /* 208 */ "expr ::= expr IS NOT expr", + /* 209 */ "expr ::= NOT expr", + /* 210 */ "expr ::= BITNOT expr", + /* 211 */ "expr ::= PLUS|MINUS expr", + /* 212 */ "expr ::= expr PTR expr", + /* 213 */ "between_op ::= BETWEEN", + /* 214 */ "between_op ::= NOT BETWEEN", + /* 215 */ "expr ::= expr between_op expr AND expr", + /* 216 */ "in_op ::= IN", + /* 217 */ "in_op ::= NOT IN", + /* 218 */ "expr ::= expr in_op LP exprlist RP", + /* 219 */ "expr ::= LP select RP", + /* 220 */ "expr ::= expr in_op LP select RP", + /* 221 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 222 */ "expr ::= EXISTS LP select RP", + /* 223 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 224 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 225 */ "case_exprlist ::= WHEN expr THEN expr", + /* 226 */ "case_else ::= ELSE expr", + /* 227 */ "case_else ::=", + /* 228 */ "case_operand ::= expr", + /* 229 */ "case_operand ::=", + /* 230 */ "exprlist ::=", + /* 231 */ "nexprlist ::= nexprlist COMMA expr", + /* 232 */ "nexprlist ::= expr", + /* 233 */ "paren_exprlist ::=", + /* 234 */ "paren_exprlist ::= LP exprlist RP", + /* 235 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 236 */ "uniqueflag ::= UNIQUE", + /* 237 */ "uniqueflag ::=", + /* 238 */ "eidlist_opt ::=", + /* 239 */ "eidlist_opt ::= LP eidlist RP", + /* 240 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 241 */ "eidlist ::= nm collate sortorder", + /* 242 */ "collate ::=", + /* 243 */ "collate ::= COLLATE ID|STRING", + /* 244 */ "cmd ::= DROP INDEX ifexists fullname", + /* 245 */ "cmd ::= VACUUM vinto", + /* 246 */ "cmd ::= VACUUM nm vinto", + /* 247 */ "vinto ::= INTO expr", + /* 248 */ "vinto ::=", + /* 249 */ "cmd ::= PRAGMA nm dbnm", + /* 250 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 251 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 252 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 253 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 254 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 255 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 256 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 257 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 258 */ "trigger_time ::= BEFORE|AFTER", + /* 259 */ "trigger_time ::= INSTEAD OF", + /* 260 */ "trigger_time ::=", + /* 261 */ "trigger_event ::= DELETE|INSERT", + /* 262 */ "trigger_event ::= UPDATE", + /* 263 */ "trigger_event ::= UPDATE OF idlist", + /* 264 */ "when_clause ::=", + /* 265 */ "when_clause ::= WHEN expr", + /* 266 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 267 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 268 */ "trnm ::= nm DOT nm", + /* 269 */ "tridxby ::= INDEXED BY nm", + /* 270 */ "tridxby ::= NOT INDEXED", + /* 271 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", + /* 272 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", + /* 273 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", + /* 274 */ "trigger_cmd ::= scanpt select scanpt", + /* 275 */ "expr ::= RAISE LP IGNORE RP", + /* 276 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 277 */ "raisetype ::= ROLLBACK", + /* 278 */ "raisetype ::= ABORT", + /* 279 */ "raisetype ::= FAIL", + /* 280 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 281 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 282 */ "cmd ::= DETACH database_kw_opt expr", + /* 283 */ "key_opt ::=", + /* 284 */ "key_opt ::= KEY expr", + /* 285 */ "cmd ::= REINDEX", + /* 286 */ "cmd ::= REINDEX nm dbnm", + /* 287 */ "cmd ::= ANALYZE", + /* 288 */ "cmd ::= ANALYZE nm dbnm", + /* 289 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 290 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 291 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", + /* 292 */ "add_column_fullname ::= fullname", + /* 293 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", + /* 294 */ "cmd ::= create_vtab", + /* 295 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 296 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 297 */ "vtabarg ::=", + /* 298 */ "vtabargtoken ::= ANY", + /* 299 */ "vtabargtoken ::= lp anylist RP", + /* 300 */ "lp ::= LP", + /* 301 */ "with ::= WITH wqlist", + /* 302 */ "with ::= WITH RECURSIVE wqlist", + /* 303 */ "wqas ::= AS", + /* 304 */ "wqas ::= AS MATERIALIZED", + /* 305 */ "wqas ::= AS NOT MATERIALIZED", + /* 306 */ "wqitem ::= nm eidlist_opt wqas LP select RP", + /* 307 */ "wqlist ::= wqitem", + /* 308 */ "wqlist ::= wqlist COMMA wqitem", + /* 309 */ "windowdefn_list ::= windowdefn", + /* 310 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 311 */ "windowdefn ::= nm AS LP window RP", + /* 312 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 313 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 314 */ "window ::= ORDER BY sortlist frame_opt", + /* 315 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 316 */ "window ::= frame_opt", + /* 317 */ "window ::= nm frame_opt", + /* 318 */ "frame_opt ::=", + /* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 320 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 321 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 322 */ "frame_bound_s ::= frame_bound", + /* 323 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 324 */ "frame_bound_e ::= frame_bound", + /* 325 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 326 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 327 */ "frame_bound ::= CURRENT ROW", + /* 328 */ "frame_exclude_opt ::=", + /* 329 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 330 */ "frame_exclude ::= NO OTHERS", + /* 331 */ "frame_exclude ::= CURRENT ROW", + /* 332 */ "frame_exclude ::= GROUP|TIES", + /* 333 */ "window_clause ::= WINDOW windowdefn_list", + /* 334 */ "filter_over ::= filter_clause over_clause", + /* 335 */ "filter_over ::= over_clause", + /* 336 */ "filter_over ::= filter_clause", + /* 337 */ "over_clause ::= OVER LP window RP", + /* 338 */ "over_clause ::= OVER nm", + /* 339 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 340 */ "input ::= cmdlist", + /* 341 */ "cmdlist ::= cmdlist ecmd", + /* 342 */ "cmdlist ::= ecmd", + /* 343 */ "ecmd ::= SEMI", + /* 344 */ "ecmd ::= cmdx SEMI", + /* 345 */ "ecmd ::= explain cmdx SEMI", + /* 346 */ "trans_opt ::=", + /* 347 */ "trans_opt ::= TRANSACTION", + /* 348 */ "trans_opt ::= TRANSACTION nm", + /* 349 */ "savepoint_opt ::= SAVEPOINT", + /* 350 */ "savepoint_opt ::=", + /* 351 */ "cmd ::= create_table create_table_args", + /* 352 */ "table_option_set ::= table_option", + /* 353 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 354 */ "columnlist ::= columnname carglist", + /* 355 */ "nm ::= ID|INDEXED", + /* 356 */ "nm ::= STRING", + /* 357 */ "nm ::= JOIN_KW", + /* 358 */ "typetoken ::= typename", + /* 359 */ "typename ::= ID|STRING", + /* 360 */ "signed ::= plus_num", + /* 361 */ "signed ::= minus_num", + /* 362 */ "carglist ::= carglist ccons", + /* 363 */ "carglist ::=", + /* 364 */ "ccons ::= NULL onconf", + /* 365 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 366 */ "ccons ::= AS generated", + /* 367 */ "conslist_opt ::= COMMA conslist", + /* 368 */ "conslist ::= conslist tconscomma tcons", + /* 369 */ "conslist ::= tcons", + /* 370 */ "tconscomma ::=", + /* 371 */ "defer_subclause_opt ::= defer_subclause", + /* 372 */ "resolvetype ::= raisetype", + /* 373 */ "selectnowith ::= oneselect", + /* 374 */ "oneselect ::= values", + /* 375 */ "sclp ::= selcollist COMMA", + /* 376 */ "as ::= ID|STRING", + /* 377 */ "returning ::=", + /* 378 */ "expr ::= term", + /* 379 */ "likeop ::= LIKE_KW|MATCH", + /* 380 */ "exprlist ::= nexprlist", + /* 381 */ "nmnum ::= plus_num", + /* 382 */ "nmnum ::= nm", + /* 383 */ "nmnum ::= ON", + /* 384 */ "nmnum ::= DELETE", + /* 385 */ "nmnum ::= DEFAULT", + /* 386 */ "plus_num ::= INTEGER|FLOAT", + /* 387 */ "foreach_clause ::=", + /* 388 */ "foreach_clause ::= FOR EACH ROW", + /* 389 */ "trnm ::= nm", + /* 390 */ "tridxby ::=", + /* 391 */ "database_kw_opt ::= DATABASE", + /* 392 */ "database_kw_opt ::=", + /* 393 */ "kwcolumn_opt ::=", + /* 394 */ "kwcolumn_opt ::= COLUMNKW", + /* 395 */ "vtabarglist ::= vtabarg", + /* 396 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 397 */ "vtabarg ::= vtabarg vtabargtoken", + /* 398 */ "anylist ::=", + /* 399 */ "anylist ::= anylist LP anylist RP", + /* 400 */ "anylist ::= anylist ANY", + /* 401 */ "with ::=", }; #endif /* NDEBUG */ @@ -160318,99 +163742,99 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 203: /* select */ - case 237: /* selectnowith */ - case 238: /* oneselect */ - case 250: /* values */ + case 204: /* select */ + case 239: /* selectnowith */ + case 240: /* oneselect */ + case 252: /* values */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy81)); -} - break; - case 214: /* term */ - case 215: /* expr */ - case 244: /* where_opt */ - case 246: /* having_opt */ - case 258: /* on_opt */ - case 265: /* where_opt_ret */ - case 276: /* case_operand */ - case 278: /* case_else */ - case 281: /* vinto */ - case 288: /* when_clause */ - case 293: /* key_opt */ - case 309: /* filter_clause */ +sqlite3SelectDelete(pParse->db, (yypminor->yy47)); +} + break; + case 216: /* term */ + case 217: /* expr */ + case 246: /* where_opt */ + case 248: /* having_opt */ + case 260: /* on_opt */ + case 267: /* where_opt_ret */ + case 278: /* case_operand */ + case 280: /* case_else */ + case 283: /* vinto */ + case 290: /* when_clause */ + case 295: /* key_opt */ + case 311: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy404)); -} - break; - case 219: /* eidlist_opt */ - case 229: /* sortlist */ - case 230: /* eidlist */ - case 242: /* selcollist */ - case 245: /* groupby_opt */ - case 247: /* orderby_opt */ - case 251: /* nexprlist */ - case 252: /* sclp */ - case 260: /* exprlist */ - case 266: /* setlist */ - case 275: /* paren_exprlist */ - case 277: /* case_exprlist */ - case 308: /* part_opt */ +sqlite3ExprDelete(pParse->db, (yypminor->yy528)); +} + break; + case 221: /* eidlist_opt */ + case 231: /* sortlist */ + case 232: /* eidlist */ + case 244: /* selcollist */ + case 247: /* groupby_opt */ + case 249: /* orderby_opt */ + case 253: /* nexprlist */ + case 254: /* sclp */ + case 262: /* exprlist */ + case 268: /* setlist */ + case 277: /* paren_exprlist */ + case 279: /* case_exprlist */ + case 310: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy70)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy322)); } break; - case 236: /* fullname */ - case 243: /* from */ - case 254: /* seltablist */ - case 255: /* stl_prefix */ - case 261: /* xfullname */ + case 238: /* fullname */ + case 245: /* from */ + case 256: /* seltablist */ + case 257: /* stl_prefix */ + case 263: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy153)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy131)); } break; - case 239: /* wqlist */ + case 241: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy103)); +sqlite3WithDelete(pParse->db, (yypminor->yy521)); } break; - case 249: /* window_clause */ - case 304: /* windowdefn_list */ + case 251: /* window_clause */ + case 306: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy49)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy41)); } break; - case 259: /* using_opt */ - case 262: /* idlist */ - case 268: /* idlist_opt */ + case 261: /* using_opt */ + case 264: /* idlist */ + case 270: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy436)); +sqlite3IdListDelete(pParse->db, (yypminor->yy254)); } break; - case 271: /* filter_over */ - case 305: /* windowdefn */ - case 306: /* window */ - case 307: /* frame_opt */ - case 310: /* over_clause */ + case 273: /* filter_over */ + case 307: /* windowdefn */ + case 308: /* window */ + case 309: /* frame_opt */ + case 312: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy49)); +sqlite3WindowDelete(pParse->db, (yypminor->yy41)); } break; - case 284: /* trigger_cmd_list */ - case 289: /* trigger_cmd */ + case 286: /* trigger_cmd_list */ + case 291: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy157)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy33)); } break; - case 286: /* trigger_event */ + case 288: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy262).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy180).b); } break; - case 312: /* frame_bound */ - case 313: /* frame_bound_s */ - case 314: /* frame_bound_e */ + case 314: /* frame_bound */ + case 315: /* frame_bound_s */ + case 316: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy117).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy595).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -160701,404 +164125,408 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 188, /* (0) explain ::= EXPLAIN */ - 188, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 187, /* (2) cmdx ::= cmd */ - 189, /* (3) cmd ::= BEGIN transtype trans_opt */ - 190, /* (4) transtype ::= */ - 190, /* (5) transtype ::= DEFERRED */ - 190, /* (6) transtype ::= IMMEDIATE */ - 190, /* (7) transtype ::= EXCLUSIVE */ - 189, /* (8) cmd ::= COMMIT|END trans_opt */ - 189, /* (9) cmd ::= ROLLBACK trans_opt */ - 189, /* (10) cmd ::= SAVEPOINT nm */ - 189, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 189, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 194, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 196, /* (14) createkw ::= CREATE */ - 198, /* (15) ifnotexists ::= */ - 198, /* (16) ifnotexists ::= IF NOT EXISTS */ - 197, /* (17) temp ::= TEMP */ - 197, /* (18) temp ::= */ - 195, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */ - 195, /* (20) create_table_args ::= AS select */ - 202, /* (21) table_options ::= */ - 202, /* (22) table_options ::= WITHOUT nm */ - 204, /* (23) columnname ::= nm typetoken */ - 206, /* (24) typetoken ::= */ - 206, /* (25) typetoken ::= typename LP signed RP */ - 206, /* (26) typetoken ::= typename LP signed COMMA signed RP */ - 207, /* (27) typename ::= typename ID|STRING */ - 211, /* (28) scanpt ::= */ - 212, /* (29) scantok ::= */ - 213, /* (30) ccons ::= CONSTRAINT nm */ - 213, /* (31) ccons ::= DEFAULT scantok term */ - 213, /* (32) ccons ::= DEFAULT LP expr RP */ - 213, /* (33) ccons ::= DEFAULT PLUS scantok term */ - 213, /* (34) ccons ::= DEFAULT MINUS scantok term */ - 213, /* (35) ccons ::= DEFAULT scantok ID|INDEXED */ - 213, /* (36) ccons ::= NOT NULL onconf */ - 213, /* (37) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 213, /* (38) ccons ::= UNIQUE onconf */ - 213, /* (39) ccons ::= CHECK LP expr RP */ - 213, /* (40) ccons ::= REFERENCES nm eidlist_opt refargs */ - 213, /* (41) ccons ::= defer_subclause */ - 213, /* (42) ccons ::= COLLATE ID|STRING */ - 222, /* (43) generated ::= LP expr RP */ - 222, /* (44) generated ::= LP expr RP ID */ - 218, /* (45) autoinc ::= */ - 218, /* (46) autoinc ::= AUTOINCR */ - 220, /* (47) refargs ::= */ - 220, /* (48) refargs ::= refargs refarg */ - 223, /* (49) refarg ::= MATCH nm */ - 223, /* (50) refarg ::= ON INSERT refact */ - 223, /* (51) refarg ::= ON DELETE refact */ - 223, /* (52) refarg ::= ON UPDATE refact */ - 224, /* (53) refact ::= SET NULL */ - 224, /* (54) refact ::= SET DEFAULT */ - 224, /* (55) refact ::= CASCADE */ - 224, /* (56) refact ::= RESTRICT */ - 224, /* (57) refact ::= NO ACTION */ - 221, /* (58) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 221, /* (59) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 225, /* (60) init_deferred_pred_opt ::= */ - 225, /* (61) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 225, /* (62) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 201, /* (63) conslist_opt ::= */ - 227, /* (64) tconscomma ::= COMMA */ - 228, /* (65) tcons ::= CONSTRAINT nm */ - 228, /* (66) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 228, /* (67) tcons ::= UNIQUE LP sortlist RP onconf */ - 228, /* (68) tcons ::= CHECK LP expr RP onconf */ - 228, /* (69) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 231, /* (70) defer_subclause_opt ::= */ - 216, /* (71) onconf ::= */ - 216, /* (72) onconf ::= ON CONFLICT resolvetype */ - 232, /* (73) orconf ::= */ - 232, /* (74) orconf ::= OR resolvetype */ - 233, /* (75) resolvetype ::= IGNORE */ - 233, /* (76) resolvetype ::= REPLACE */ - 189, /* (77) cmd ::= DROP TABLE ifexists fullname */ - 235, /* (78) ifexists ::= IF EXISTS */ - 235, /* (79) ifexists ::= */ - 189, /* (80) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 189, /* (81) cmd ::= DROP VIEW ifexists fullname */ - 189, /* (82) cmd ::= select */ - 203, /* (83) select ::= WITH wqlist selectnowith */ - 203, /* (84) select ::= WITH RECURSIVE wqlist selectnowith */ - 203, /* (85) select ::= selectnowith */ - 237, /* (86) selectnowith ::= selectnowith multiselect_op oneselect */ - 240, /* (87) multiselect_op ::= UNION */ - 240, /* (88) multiselect_op ::= UNION ALL */ - 240, /* (89) multiselect_op ::= EXCEPT|INTERSECT */ - 238, /* (90) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 238, /* (91) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 250, /* (92) values ::= VALUES LP nexprlist RP */ - 250, /* (93) values ::= values COMMA LP nexprlist RP */ - 241, /* (94) distinct ::= DISTINCT */ - 241, /* (95) distinct ::= ALL */ - 241, /* (96) distinct ::= */ - 252, /* (97) sclp ::= */ - 242, /* (98) selcollist ::= sclp scanpt expr scanpt as */ - 242, /* (99) selcollist ::= sclp scanpt STAR */ - 242, /* (100) selcollist ::= sclp scanpt nm DOT STAR */ - 253, /* (101) as ::= AS nm */ - 253, /* (102) as ::= */ - 243, /* (103) from ::= */ - 243, /* (104) from ::= FROM seltablist */ - 255, /* (105) stl_prefix ::= seltablist joinop */ - 255, /* (106) stl_prefix ::= */ - 254, /* (107) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ - 254, /* (108) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ - 254, /* (109) seltablist ::= stl_prefix LP select RP as on_opt using_opt */ - 254, /* (110) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ - 199, /* (111) dbnm ::= */ - 199, /* (112) dbnm ::= DOT nm */ - 236, /* (113) fullname ::= nm */ - 236, /* (114) fullname ::= nm DOT nm */ - 261, /* (115) xfullname ::= nm */ - 261, /* (116) xfullname ::= nm DOT nm */ - 261, /* (117) xfullname ::= nm DOT nm AS nm */ - 261, /* (118) xfullname ::= nm AS nm */ - 256, /* (119) joinop ::= COMMA|JOIN */ - 256, /* (120) joinop ::= JOIN_KW JOIN */ - 256, /* (121) joinop ::= JOIN_KW nm JOIN */ - 256, /* (122) joinop ::= JOIN_KW nm nm JOIN */ - 258, /* (123) on_opt ::= ON expr */ - 258, /* (124) on_opt ::= */ - 257, /* (125) indexed_opt ::= */ - 257, /* (126) indexed_opt ::= INDEXED BY nm */ - 257, /* (127) indexed_opt ::= NOT INDEXED */ - 259, /* (128) using_opt ::= USING LP idlist RP */ - 259, /* (129) using_opt ::= */ - 247, /* (130) orderby_opt ::= */ - 247, /* (131) orderby_opt ::= ORDER BY sortlist */ - 229, /* (132) sortlist ::= sortlist COMMA expr sortorder nulls */ - 229, /* (133) sortlist ::= expr sortorder nulls */ - 217, /* (134) sortorder ::= ASC */ - 217, /* (135) sortorder ::= DESC */ - 217, /* (136) sortorder ::= */ - 263, /* (137) nulls ::= NULLS FIRST */ - 263, /* (138) nulls ::= NULLS LAST */ - 263, /* (139) nulls ::= */ - 245, /* (140) groupby_opt ::= */ - 245, /* (141) groupby_opt ::= GROUP BY nexprlist */ - 246, /* (142) having_opt ::= */ - 246, /* (143) having_opt ::= HAVING expr */ - 248, /* (144) limit_opt ::= */ - 248, /* (145) limit_opt ::= LIMIT expr */ - 248, /* (146) limit_opt ::= LIMIT expr OFFSET expr */ - 248, /* (147) limit_opt ::= LIMIT expr COMMA expr */ - 189, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ - 244, /* (149) where_opt ::= */ - 244, /* (150) where_opt ::= WHERE expr */ - 265, /* (151) where_opt_ret ::= */ - 265, /* (152) where_opt_ret ::= WHERE expr */ - 265, /* (153) where_opt_ret ::= RETURNING selcollist */ - 265, /* (154) where_opt_ret ::= WHERE expr RETURNING selcollist */ - 189, /* (155) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ - 266, /* (156) setlist ::= setlist COMMA nm EQ expr */ - 266, /* (157) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 266, /* (158) setlist ::= nm EQ expr */ - 266, /* (159) setlist ::= LP idlist RP EQ expr */ - 189, /* (160) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 189, /* (161) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 269, /* (162) upsert ::= */ - 269, /* (163) upsert ::= RETURNING selcollist */ - 269, /* (164) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - 269, /* (165) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - 269, /* (166) upsert ::= ON CONFLICT DO NOTHING returning */ - 269, /* (167) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - 270, /* (168) returning ::= RETURNING selcollist */ - 267, /* (169) insert_cmd ::= INSERT orconf */ - 267, /* (170) insert_cmd ::= REPLACE */ - 268, /* (171) idlist_opt ::= */ - 268, /* (172) idlist_opt ::= LP idlist RP */ - 262, /* (173) idlist ::= idlist COMMA nm */ - 262, /* (174) idlist ::= nm */ - 215, /* (175) expr ::= LP expr RP */ - 215, /* (176) expr ::= ID|INDEXED */ - 215, /* (177) expr ::= JOIN_KW */ - 215, /* (178) expr ::= nm DOT nm */ - 215, /* (179) expr ::= nm DOT nm DOT nm */ - 214, /* (180) term ::= NULL|FLOAT|BLOB */ - 214, /* (181) term ::= STRING */ - 214, /* (182) term ::= INTEGER */ - 215, /* (183) expr ::= VARIABLE */ - 215, /* (184) expr ::= expr COLLATE ID|STRING */ - 215, /* (185) expr ::= CAST LP expr AS typetoken RP */ - 215, /* (186) expr ::= ID|INDEXED LP distinct exprlist RP */ - 215, /* (187) expr ::= ID|INDEXED LP STAR RP */ - 215, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - 215, /* (189) expr ::= ID|INDEXED LP STAR RP filter_over */ - 214, /* (190) term ::= CTIME_KW */ - 215, /* (191) expr ::= LP nexprlist COMMA expr RP */ - 215, /* (192) expr ::= expr AND expr */ - 215, /* (193) expr ::= expr OR expr */ - 215, /* (194) expr ::= expr LT|GT|GE|LE expr */ - 215, /* (195) expr ::= expr EQ|NE expr */ - 215, /* (196) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 215, /* (197) expr ::= expr PLUS|MINUS expr */ - 215, /* (198) expr ::= expr STAR|SLASH|REM expr */ - 215, /* (199) expr ::= expr CONCAT expr */ - 272, /* (200) likeop ::= NOT LIKE_KW|MATCH */ - 215, /* (201) expr ::= expr likeop expr */ - 215, /* (202) expr ::= expr likeop expr ESCAPE expr */ - 215, /* (203) expr ::= expr ISNULL|NOTNULL */ - 215, /* (204) expr ::= expr NOT NULL */ - 215, /* (205) expr ::= expr IS expr */ - 215, /* (206) expr ::= expr IS NOT expr */ - 215, /* (207) expr ::= NOT expr */ - 215, /* (208) expr ::= BITNOT expr */ - 215, /* (209) expr ::= PLUS|MINUS expr */ - 273, /* (210) between_op ::= BETWEEN */ - 273, /* (211) between_op ::= NOT BETWEEN */ - 215, /* (212) expr ::= expr between_op expr AND expr */ - 274, /* (213) in_op ::= IN */ - 274, /* (214) in_op ::= NOT IN */ - 215, /* (215) expr ::= expr in_op LP exprlist RP */ - 215, /* (216) expr ::= LP select RP */ - 215, /* (217) expr ::= expr in_op LP select RP */ - 215, /* (218) expr ::= expr in_op nm dbnm paren_exprlist */ - 215, /* (219) expr ::= EXISTS LP select RP */ - 215, /* (220) expr ::= CASE case_operand case_exprlist case_else END */ - 277, /* (221) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 277, /* (222) case_exprlist ::= WHEN expr THEN expr */ - 278, /* (223) case_else ::= ELSE expr */ - 278, /* (224) case_else ::= */ - 276, /* (225) case_operand ::= expr */ - 276, /* (226) case_operand ::= */ - 260, /* (227) exprlist ::= */ - 251, /* (228) nexprlist ::= nexprlist COMMA expr */ - 251, /* (229) nexprlist ::= expr */ - 275, /* (230) paren_exprlist ::= */ - 275, /* (231) paren_exprlist ::= LP exprlist RP */ - 189, /* (232) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 279, /* (233) uniqueflag ::= UNIQUE */ - 279, /* (234) uniqueflag ::= */ - 219, /* (235) eidlist_opt ::= */ - 219, /* (236) eidlist_opt ::= LP eidlist RP */ - 230, /* (237) eidlist ::= eidlist COMMA nm collate sortorder */ - 230, /* (238) eidlist ::= nm collate sortorder */ - 280, /* (239) collate ::= */ - 280, /* (240) collate ::= COLLATE ID|STRING */ - 189, /* (241) cmd ::= DROP INDEX ifexists fullname */ - 189, /* (242) cmd ::= VACUUM vinto */ - 189, /* (243) cmd ::= VACUUM nm vinto */ - 281, /* (244) vinto ::= INTO expr */ - 281, /* (245) vinto ::= */ - 189, /* (246) cmd ::= PRAGMA nm dbnm */ - 189, /* (247) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 189, /* (248) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 189, /* (249) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 189, /* (250) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 209, /* (251) plus_num ::= PLUS INTEGER|FLOAT */ - 210, /* (252) minus_num ::= MINUS INTEGER|FLOAT */ - 189, /* (253) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 283, /* (254) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 285, /* (255) trigger_time ::= BEFORE|AFTER */ - 285, /* (256) trigger_time ::= INSTEAD OF */ - 285, /* (257) trigger_time ::= */ - 286, /* (258) trigger_event ::= DELETE|INSERT */ - 286, /* (259) trigger_event ::= UPDATE */ - 286, /* (260) trigger_event ::= UPDATE OF idlist */ - 288, /* (261) when_clause ::= */ - 288, /* (262) when_clause ::= WHEN expr */ - 284, /* (263) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 284, /* (264) trigger_cmd_list ::= trigger_cmd SEMI */ - 290, /* (265) trnm ::= nm DOT nm */ - 291, /* (266) tridxby ::= INDEXED BY nm */ - 291, /* (267) tridxby ::= NOT INDEXED */ - 289, /* (268) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 289, /* (269) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 289, /* (270) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 289, /* (271) trigger_cmd ::= scanpt select scanpt */ - 215, /* (272) expr ::= RAISE LP IGNORE RP */ - 215, /* (273) expr ::= RAISE LP raisetype COMMA nm RP */ - 234, /* (274) raisetype ::= ROLLBACK */ - 234, /* (275) raisetype ::= ABORT */ - 234, /* (276) raisetype ::= FAIL */ - 189, /* (277) cmd ::= DROP TRIGGER ifexists fullname */ - 189, /* (278) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 189, /* (279) cmd ::= DETACH database_kw_opt expr */ - 293, /* (280) key_opt ::= */ - 293, /* (281) key_opt ::= KEY expr */ - 189, /* (282) cmd ::= REINDEX */ - 189, /* (283) cmd ::= REINDEX nm dbnm */ - 189, /* (284) cmd ::= ANALYZE */ - 189, /* (285) cmd ::= ANALYZE nm dbnm */ - 189, /* (286) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 189, /* (287) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 189, /* (288) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - 294, /* (289) add_column_fullname ::= fullname */ - 189, /* (290) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 189, /* (291) cmd ::= create_vtab */ - 189, /* (292) cmd ::= create_vtab LP vtabarglist RP */ - 296, /* (293) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 298, /* (294) vtabarg ::= */ - 299, /* (295) vtabargtoken ::= ANY */ - 299, /* (296) vtabargtoken ::= lp anylist RP */ - 300, /* (297) lp ::= LP */ - 264, /* (298) with ::= WITH wqlist */ - 264, /* (299) with ::= WITH RECURSIVE wqlist */ - 303, /* (300) wqas ::= AS */ - 303, /* (301) wqas ::= AS MATERIALIZED */ - 303, /* (302) wqas ::= AS NOT MATERIALIZED */ - 302, /* (303) wqitem ::= nm eidlist_opt wqas LP select RP */ - 239, /* (304) wqlist ::= wqitem */ - 239, /* (305) wqlist ::= wqlist COMMA wqitem */ - 304, /* (306) windowdefn_list ::= windowdefn */ - 304, /* (307) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 305, /* (308) windowdefn ::= nm AS LP window RP */ - 306, /* (309) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 306, /* (310) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 306, /* (311) window ::= ORDER BY sortlist frame_opt */ - 306, /* (312) window ::= nm ORDER BY sortlist frame_opt */ - 306, /* (313) window ::= frame_opt */ - 306, /* (314) window ::= nm frame_opt */ - 307, /* (315) frame_opt ::= */ - 307, /* (316) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 307, /* (317) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 311, /* (318) range_or_rows ::= RANGE|ROWS|GROUPS */ - 313, /* (319) frame_bound_s ::= frame_bound */ - 313, /* (320) frame_bound_s ::= UNBOUNDED PRECEDING */ - 314, /* (321) frame_bound_e ::= frame_bound */ - 314, /* (322) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 312, /* (323) frame_bound ::= expr PRECEDING|FOLLOWING */ - 312, /* (324) frame_bound ::= CURRENT ROW */ - 315, /* (325) frame_exclude_opt ::= */ - 315, /* (326) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 316, /* (327) frame_exclude ::= NO OTHERS */ - 316, /* (328) frame_exclude ::= CURRENT ROW */ - 316, /* (329) frame_exclude ::= GROUP|TIES */ - 249, /* (330) window_clause ::= WINDOW windowdefn_list */ - 271, /* (331) filter_over ::= filter_clause over_clause */ - 271, /* (332) filter_over ::= over_clause */ - 271, /* (333) filter_over ::= filter_clause */ - 310, /* (334) over_clause ::= OVER LP window RP */ - 310, /* (335) over_clause ::= OVER nm */ - 309, /* (336) filter_clause ::= FILTER LP WHERE expr RP */ - 184, /* (337) input ::= cmdlist */ - 185, /* (338) cmdlist ::= cmdlist ecmd */ - 185, /* (339) cmdlist ::= ecmd */ - 186, /* (340) ecmd ::= SEMI */ - 186, /* (341) ecmd ::= cmdx SEMI */ - 186, /* (342) ecmd ::= explain cmdx SEMI */ - 191, /* (343) trans_opt ::= */ - 191, /* (344) trans_opt ::= TRANSACTION */ - 191, /* (345) trans_opt ::= TRANSACTION nm */ - 193, /* (346) savepoint_opt ::= SAVEPOINT */ - 193, /* (347) savepoint_opt ::= */ - 189, /* (348) cmd ::= create_table create_table_args */ - 200, /* (349) columnlist ::= columnlist COMMA columnname carglist */ - 200, /* (350) columnlist ::= columnname carglist */ - 192, /* (351) nm ::= ID|INDEXED */ - 192, /* (352) nm ::= STRING */ - 192, /* (353) nm ::= JOIN_KW */ - 206, /* (354) typetoken ::= typename */ - 207, /* (355) typename ::= ID|STRING */ - 208, /* (356) signed ::= plus_num */ - 208, /* (357) signed ::= minus_num */ - 205, /* (358) carglist ::= carglist ccons */ - 205, /* (359) carglist ::= */ - 213, /* (360) ccons ::= NULL onconf */ - 213, /* (361) ccons ::= GENERATED ALWAYS AS generated */ - 213, /* (362) ccons ::= AS generated */ - 201, /* (363) conslist_opt ::= COMMA conslist */ - 226, /* (364) conslist ::= conslist tconscomma tcons */ - 226, /* (365) conslist ::= tcons */ - 227, /* (366) tconscomma ::= */ - 231, /* (367) defer_subclause_opt ::= defer_subclause */ - 233, /* (368) resolvetype ::= raisetype */ - 237, /* (369) selectnowith ::= oneselect */ - 238, /* (370) oneselect ::= values */ - 252, /* (371) sclp ::= selcollist COMMA */ - 253, /* (372) as ::= ID|STRING */ - 270, /* (373) returning ::= */ - 215, /* (374) expr ::= term */ - 272, /* (375) likeop ::= LIKE_KW|MATCH */ - 260, /* (376) exprlist ::= nexprlist */ - 282, /* (377) nmnum ::= plus_num */ - 282, /* (378) nmnum ::= nm */ - 282, /* (379) nmnum ::= ON */ - 282, /* (380) nmnum ::= DELETE */ - 282, /* (381) nmnum ::= DEFAULT */ - 209, /* (382) plus_num ::= INTEGER|FLOAT */ - 287, /* (383) foreach_clause ::= */ - 287, /* (384) foreach_clause ::= FOR EACH ROW */ - 290, /* (385) trnm ::= nm */ - 291, /* (386) tridxby ::= */ - 292, /* (387) database_kw_opt ::= DATABASE */ - 292, /* (388) database_kw_opt ::= */ - 295, /* (389) kwcolumn_opt ::= */ - 295, /* (390) kwcolumn_opt ::= COLUMNKW */ - 297, /* (391) vtabarglist ::= vtabarg */ - 297, /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ - 298, /* (393) vtabarg ::= vtabarg vtabargtoken */ - 301, /* (394) anylist ::= */ - 301, /* (395) anylist ::= anylist LP anylist RP */ - 301, /* (396) anylist ::= anylist ANY */ - 264, /* (397) with ::= */ + 189, /* (0) explain ::= EXPLAIN */ + 189, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 188, /* (2) cmdx ::= cmd */ + 190, /* (3) cmd ::= BEGIN transtype trans_opt */ + 191, /* (4) transtype ::= */ + 191, /* (5) transtype ::= DEFERRED */ + 191, /* (6) transtype ::= IMMEDIATE */ + 191, /* (7) transtype ::= EXCLUSIVE */ + 190, /* (8) cmd ::= COMMIT|END trans_opt */ + 190, /* (9) cmd ::= ROLLBACK trans_opt */ + 190, /* (10) cmd ::= SAVEPOINT nm */ + 190, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 190, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 195, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 197, /* (14) createkw ::= CREATE */ + 199, /* (15) ifnotexists ::= */ + 199, /* (16) ifnotexists ::= IF NOT EXISTS */ + 198, /* (17) temp ::= TEMP */ + 198, /* (18) temp ::= */ + 196, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ + 196, /* (20) create_table_args ::= AS select */ + 203, /* (21) table_option_set ::= */ + 203, /* (22) table_option_set ::= table_option_set COMMA table_option */ + 205, /* (23) table_option ::= WITHOUT nm */ + 205, /* (24) table_option ::= nm */ + 206, /* (25) columnname ::= nm typetoken */ + 208, /* (26) typetoken ::= */ + 208, /* (27) typetoken ::= typename LP signed RP */ + 208, /* (28) typetoken ::= typename LP signed COMMA signed RP */ + 209, /* (29) typename ::= typename ID|STRING */ + 213, /* (30) scanpt ::= */ + 214, /* (31) scantok ::= */ + 215, /* (32) ccons ::= CONSTRAINT nm */ + 215, /* (33) ccons ::= DEFAULT scantok term */ + 215, /* (34) ccons ::= DEFAULT LP expr RP */ + 215, /* (35) ccons ::= DEFAULT PLUS scantok term */ + 215, /* (36) ccons ::= DEFAULT MINUS scantok term */ + 215, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ + 215, /* (38) ccons ::= NOT NULL onconf */ + 215, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 215, /* (40) ccons ::= UNIQUE onconf */ + 215, /* (41) ccons ::= CHECK LP expr RP */ + 215, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ + 215, /* (43) ccons ::= defer_subclause */ + 215, /* (44) ccons ::= COLLATE ID|STRING */ + 224, /* (45) generated ::= LP expr RP */ + 224, /* (46) generated ::= LP expr RP ID */ + 220, /* (47) autoinc ::= */ + 220, /* (48) autoinc ::= AUTOINCR */ + 222, /* (49) refargs ::= */ + 222, /* (50) refargs ::= refargs refarg */ + 225, /* (51) refarg ::= MATCH nm */ + 225, /* (52) refarg ::= ON INSERT refact */ + 225, /* (53) refarg ::= ON DELETE refact */ + 225, /* (54) refarg ::= ON UPDATE refact */ + 226, /* (55) refact ::= SET NULL */ + 226, /* (56) refact ::= SET DEFAULT */ + 226, /* (57) refact ::= CASCADE */ + 226, /* (58) refact ::= RESTRICT */ + 226, /* (59) refact ::= NO ACTION */ + 223, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 223, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 227, /* (62) init_deferred_pred_opt ::= */ + 227, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 227, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 202, /* (65) conslist_opt ::= */ + 229, /* (66) tconscomma ::= COMMA */ + 230, /* (67) tcons ::= CONSTRAINT nm */ + 230, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 230, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ + 230, /* (70) tcons ::= CHECK LP expr RP onconf */ + 230, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 233, /* (72) defer_subclause_opt ::= */ + 218, /* (73) onconf ::= */ + 218, /* (74) onconf ::= ON CONFLICT resolvetype */ + 234, /* (75) orconf ::= */ + 234, /* (76) orconf ::= OR resolvetype */ + 235, /* (77) resolvetype ::= IGNORE */ + 235, /* (78) resolvetype ::= REPLACE */ + 190, /* (79) cmd ::= DROP TABLE ifexists fullname */ + 237, /* (80) ifexists ::= IF EXISTS */ + 237, /* (81) ifexists ::= */ + 190, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 190, /* (83) cmd ::= DROP VIEW ifexists fullname */ + 190, /* (84) cmd ::= select */ + 204, /* (85) select ::= WITH wqlist selectnowith */ + 204, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ + 204, /* (87) select ::= selectnowith */ + 239, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ + 242, /* (89) multiselect_op ::= UNION */ + 242, /* (90) multiselect_op ::= UNION ALL */ + 242, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ + 240, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 240, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 252, /* (94) values ::= VALUES LP nexprlist RP */ + 252, /* (95) values ::= values COMMA LP nexprlist RP */ + 243, /* (96) distinct ::= DISTINCT */ + 243, /* (97) distinct ::= ALL */ + 243, /* (98) distinct ::= */ + 254, /* (99) sclp ::= */ + 244, /* (100) selcollist ::= sclp scanpt expr scanpt as */ + 244, /* (101) selcollist ::= sclp scanpt STAR */ + 244, /* (102) selcollist ::= sclp scanpt nm DOT STAR */ + 255, /* (103) as ::= AS nm */ + 255, /* (104) as ::= */ + 245, /* (105) from ::= */ + 245, /* (106) from ::= FROM seltablist */ + 257, /* (107) stl_prefix ::= seltablist joinop */ + 257, /* (108) stl_prefix ::= */ + 256, /* (109) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ + 256, /* (110) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ + 256, /* (111) seltablist ::= stl_prefix LP select RP as on_opt using_opt */ + 256, /* (112) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ + 200, /* (113) dbnm ::= */ + 200, /* (114) dbnm ::= DOT nm */ + 238, /* (115) fullname ::= nm */ + 238, /* (116) fullname ::= nm DOT nm */ + 263, /* (117) xfullname ::= nm */ + 263, /* (118) xfullname ::= nm DOT nm */ + 263, /* (119) xfullname ::= nm DOT nm AS nm */ + 263, /* (120) xfullname ::= nm AS nm */ + 258, /* (121) joinop ::= COMMA|JOIN */ + 258, /* (122) joinop ::= JOIN_KW JOIN */ + 258, /* (123) joinop ::= JOIN_KW nm JOIN */ + 258, /* (124) joinop ::= JOIN_KW nm nm JOIN */ + 260, /* (125) on_opt ::= ON expr */ + 260, /* (126) on_opt ::= */ + 259, /* (127) indexed_opt ::= */ + 259, /* (128) indexed_opt ::= INDEXED BY nm */ + 259, /* (129) indexed_opt ::= NOT INDEXED */ + 261, /* (130) using_opt ::= USING LP idlist RP */ + 261, /* (131) using_opt ::= */ + 249, /* (132) orderby_opt ::= */ + 249, /* (133) orderby_opt ::= ORDER BY sortlist */ + 231, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */ + 231, /* (135) sortlist ::= expr sortorder nulls */ + 219, /* (136) sortorder ::= ASC */ + 219, /* (137) sortorder ::= DESC */ + 219, /* (138) sortorder ::= */ + 265, /* (139) nulls ::= NULLS FIRST */ + 265, /* (140) nulls ::= NULLS LAST */ + 265, /* (141) nulls ::= */ + 247, /* (142) groupby_opt ::= */ + 247, /* (143) groupby_opt ::= GROUP BY nexprlist */ + 248, /* (144) having_opt ::= */ + 248, /* (145) having_opt ::= HAVING expr */ + 250, /* (146) limit_opt ::= */ + 250, /* (147) limit_opt ::= LIMIT expr */ + 250, /* (148) limit_opt ::= LIMIT expr OFFSET expr */ + 250, /* (149) limit_opt ::= LIMIT expr COMMA expr */ + 190, /* (150) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + 246, /* (151) where_opt ::= */ + 246, /* (152) where_opt ::= WHERE expr */ + 267, /* (153) where_opt_ret ::= */ + 267, /* (154) where_opt_ret ::= WHERE expr */ + 267, /* (155) where_opt_ret ::= RETURNING selcollist */ + 267, /* (156) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 190, /* (157) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + 268, /* (158) setlist ::= setlist COMMA nm EQ expr */ + 268, /* (159) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 268, /* (160) setlist ::= nm EQ expr */ + 268, /* (161) setlist ::= LP idlist RP EQ expr */ + 190, /* (162) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 190, /* (163) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 271, /* (164) upsert ::= */ + 271, /* (165) upsert ::= RETURNING selcollist */ + 271, /* (166) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 271, /* (167) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 271, /* (168) upsert ::= ON CONFLICT DO NOTHING returning */ + 271, /* (169) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 272, /* (170) returning ::= RETURNING selcollist */ + 269, /* (171) insert_cmd ::= INSERT orconf */ + 269, /* (172) insert_cmd ::= REPLACE */ + 270, /* (173) idlist_opt ::= */ + 270, /* (174) idlist_opt ::= LP idlist RP */ + 264, /* (175) idlist ::= idlist COMMA nm */ + 264, /* (176) idlist ::= nm */ + 217, /* (177) expr ::= LP expr RP */ + 217, /* (178) expr ::= ID|INDEXED */ + 217, /* (179) expr ::= JOIN_KW */ + 217, /* (180) expr ::= nm DOT nm */ + 217, /* (181) expr ::= nm DOT nm DOT nm */ + 216, /* (182) term ::= NULL|FLOAT|BLOB */ + 216, /* (183) term ::= STRING */ + 216, /* (184) term ::= INTEGER */ + 217, /* (185) expr ::= VARIABLE */ + 217, /* (186) expr ::= expr COLLATE ID|STRING */ + 217, /* (187) expr ::= CAST LP expr AS typetoken RP */ + 217, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP */ + 217, /* (189) expr ::= ID|INDEXED LP STAR RP */ + 217, /* (190) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + 217, /* (191) expr ::= ID|INDEXED LP STAR RP filter_over */ + 216, /* (192) term ::= CTIME_KW */ + 217, /* (193) expr ::= LP nexprlist COMMA expr RP */ + 217, /* (194) expr ::= expr AND expr */ + 217, /* (195) expr ::= expr OR expr */ + 217, /* (196) expr ::= expr LT|GT|GE|LE expr */ + 217, /* (197) expr ::= expr EQ|NE expr */ + 217, /* (198) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 217, /* (199) expr ::= expr PLUS|MINUS expr */ + 217, /* (200) expr ::= expr STAR|SLASH|REM expr */ + 217, /* (201) expr ::= expr CONCAT expr */ + 274, /* (202) likeop ::= NOT LIKE_KW|MATCH */ + 217, /* (203) expr ::= expr likeop expr */ + 217, /* (204) expr ::= expr likeop expr ESCAPE expr */ + 217, /* (205) expr ::= expr ISNULL|NOTNULL */ + 217, /* (206) expr ::= expr NOT NULL */ + 217, /* (207) expr ::= expr IS expr */ + 217, /* (208) expr ::= expr IS NOT expr */ + 217, /* (209) expr ::= NOT expr */ + 217, /* (210) expr ::= BITNOT expr */ + 217, /* (211) expr ::= PLUS|MINUS expr */ + 217, /* (212) expr ::= expr PTR expr */ + 275, /* (213) between_op ::= BETWEEN */ + 275, /* (214) between_op ::= NOT BETWEEN */ + 217, /* (215) expr ::= expr between_op expr AND expr */ + 276, /* (216) in_op ::= IN */ + 276, /* (217) in_op ::= NOT IN */ + 217, /* (218) expr ::= expr in_op LP exprlist RP */ + 217, /* (219) expr ::= LP select RP */ + 217, /* (220) expr ::= expr in_op LP select RP */ + 217, /* (221) expr ::= expr in_op nm dbnm paren_exprlist */ + 217, /* (222) expr ::= EXISTS LP select RP */ + 217, /* (223) expr ::= CASE case_operand case_exprlist case_else END */ + 279, /* (224) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 279, /* (225) case_exprlist ::= WHEN expr THEN expr */ + 280, /* (226) case_else ::= ELSE expr */ + 280, /* (227) case_else ::= */ + 278, /* (228) case_operand ::= expr */ + 278, /* (229) case_operand ::= */ + 262, /* (230) exprlist ::= */ + 253, /* (231) nexprlist ::= nexprlist COMMA expr */ + 253, /* (232) nexprlist ::= expr */ + 277, /* (233) paren_exprlist ::= */ + 277, /* (234) paren_exprlist ::= LP exprlist RP */ + 190, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 281, /* (236) uniqueflag ::= UNIQUE */ + 281, /* (237) uniqueflag ::= */ + 221, /* (238) eidlist_opt ::= */ + 221, /* (239) eidlist_opt ::= LP eidlist RP */ + 232, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */ + 232, /* (241) eidlist ::= nm collate sortorder */ + 282, /* (242) collate ::= */ + 282, /* (243) collate ::= COLLATE ID|STRING */ + 190, /* (244) cmd ::= DROP INDEX ifexists fullname */ + 190, /* (245) cmd ::= VACUUM vinto */ + 190, /* (246) cmd ::= VACUUM nm vinto */ + 283, /* (247) vinto ::= INTO expr */ + 283, /* (248) vinto ::= */ + 190, /* (249) cmd ::= PRAGMA nm dbnm */ + 190, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 190, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 190, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 190, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 211, /* (254) plus_num ::= PLUS INTEGER|FLOAT */ + 212, /* (255) minus_num ::= MINUS INTEGER|FLOAT */ + 190, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 285, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 287, /* (258) trigger_time ::= BEFORE|AFTER */ + 287, /* (259) trigger_time ::= INSTEAD OF */ + 287, /* (260) trigger_time ::= */ + 288, /* (261) trigger_event ::= DELETE|INSERT */ + 288, /* (262) trigger_event ::= UPDATE */ + 288, /* (263) trigger_event ::= UPDATE OF idlist */ + 290, /* (264) when_clause ::= */ + 290, /* (265) when_clause ::= WHEN expr */ + 286, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 286, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */ + 292, /* (268) trnm ::= nm DOT nm */ + 293, /* (269) tridxby ::= INDEXED BY nm */ + 293, /* (270) tridxby ::= NOT INDEXED */ + 291, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 291, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 291, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 291, /* (274) trigger_cmd ::= scanpt select scanpt */ + 217, /* (275) expr ::= RAISE LP IGNORE RP */ + 217, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */ + 236, /* (277) raisetype ::= ROLLBACK */ + 236, /* (278) raisetype ::= ABORT */ + 236, /* (279) raisetype ::= FAIL */ + 190, /* (280) cmd ::= DROP TRIGGER ifexists fullname */ + 190, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 190, /* (282) cmd ::= DETACH database_kw_opt expr */ + 295, /* (283) key_opt ::= */ + 295, /* (284) key_opt ::= KEY expr */ + 190, /* (285) cmd ::= REINDEX */ + 190, /* (286) cmd ::= REINDEX nm dbnm */ + 190, /* (287) cmd ::= ANALYZE */ + 190, /* (288) cmd ::= ANALYZE nm dbnm */ + 190, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 190, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 190, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 296, /* (292) add_column_fullname ::= fullname */ + 190, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 190, /* (294) cmd ::= create_vtab */ + 190, /* (295) cmd ::= create_vtab LP vtabarglist RP */ + 298, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 300, /* (297) vtabarg ::= */ + 301, /* (298) vtabargtoken ::= ANY */ + 301, /* (299) vtabargtoken ::= lp anylist RP */ + 302, /* (300) lp ::= LP */ + 266, /* (301) with ::= WITH wqlist */ + 266, /* (302) with ::= WITH RECURSIVE wqlist */ + 305, /* (303) wqas ::= AS */ + 305, /* (304) wqas ::= AS MATERIALIZED */ + 305, /* (305) wqas ::= AS NOT MATERIALIZED */ + 304, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */ + 241, /* (307) wqlist ::= wqitem */ + 241, /* (308) wqlist ::= wqlist COMMA wqitem */ + 306, /* (309) windowdefn_list ::= windowdefn */ + 306, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 307, /* (311) windowdefn ::= nm AS LP window RP */ + 308, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 308, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 308, /* (314) window ::= ORDER BY sortlist frame_opt */ + 308, /* (315) window ::= nm ORDER BY sortlist frame_opt */ + 308, /* (316) window ::= frame_opt */ + 308, /* (317) window ::= nm frame_opt */ + 309, /* (318) frame_opt ::= */ + 309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 309, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 313, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ + 315, /* (322) frame_bound_s ::= frame_bound */ + 315, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ + 316, /* (324) frame_bound_e ::= frame_bound */ + 316, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 314, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ + 314, /* (327) frame_bound ::= CURRENT ROW */ + 317, /* (328) frame_exclude_opt ::= */ + 317, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 318, /* (330) frame_exclude ::= NO OTHERS */ + 318, /* (331) frame_exclude ::= CURRENT ROW */ + 318, /* (332) frame_exclude ::= GROUP|TIES */ + 251, /* (333) window_clause ::= WINDOW windowdefn_list */ + 273, /* (334) filter_over ::= filter_clause over_clause */ + 273, /* (335) filter_over ::= over_clause */ + 273, /* (336) filter_over ::= filter_clause */ + 312, /* (337) over_clause ::= OVER LP window RP */ + 312, /* (338) over_clause ::= OVER nm */ + 311, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ + 185, /* (340) input ::= cmdlist */ + 186, /* (341) cmdlist ::= cmdlist ecmd */ + 186, /* (342) cmdlist ::= ecmd */ + 187, /* (343) ecmd ::= SEMI */ + 187, /* (344) ecmd ::= cmdx SEMI */ + 187, /* (345) ecmd ::= explain cmdx SEMI */ + 192, /* (346) trans_opt ::= */ + 192, /* (347) trans_opt ::= TRANSACTION */ + 192, /* (348) trans_opt ::= TRANSACTION nm */ + 194, /* (349) savepoint_opt ::= SAVEPOINT */ + 194, /* (350) savepoint_opt ::= */ + 190, /* (351) cmd ::= create_table create_table_args */ + 203, /* (352) table_option_set ::= table_option */ + 201, /* (353) columnlist ::= columnlist COMMA columnname carglist */ + 201, /* (354) columnlist ::= columnname carglist */ + 193, /* (355) nm ::= ID|INDEXED */ + 193, /* (356) nm ::= STRING */ + 193, /* (357) nm ::= JOIN_KW */ + 208, /* (358) typetoken ::= typename */ + 209, /* (359) typename ::= ID|STRING */ + 210, /* (360) signed ::= plus_num */ + 210, /* (361) signed ::= minus_num */ + 207, /* (362) carglist ::= carglist ccons */ + 207, /* (363) carglist ::= */ + 215, /* (364) ccons ::= NULL onconf */ + 215, /* (365) ccons ::= GENERATED ALWAYS AS generated */ + 215, /* (366) ccons ::= AS generated */ + 202, /* (367) conslist_opt ::= COMMA conslist */ + 228, /* (368) conslist ::= conslist tconscomma tcons */ + 228, /* (369) conslist ::= tcons */ + 229, /* (370) tconscomma ::= */ + 233, /* (371) defer_subclause_opt ::= defer_subclause */ + 235, /* (372) resolvetype ::= raisetype */ + 239, /* (373) selectnowith ::= oneselect */ + 240, /* (374) oneselect ::= values */ + 254, /* (375) sclp ::= selcollist COMMA */ + 255, /* (376) as ::= ID|STRING */ + 272, /* (377) returning ::= */ + 217, /* (378) expr ::= term */ + 274, /* (379) likeop ::= LIKE_KW|MATCH */ + 262, /* (380) exprlist ::= nexprlist */ + 284, /* (381) nmnum ::= plus_num */ + 284, /* (382) nmnum ::= nm */ + 284, /* (383) nmnum ::= ON */ + 284, /* (384) nmnum ::= DELETE */ + 284, /* (385) nmnum ::= DEFAULT */ + 211, /* (386) plus_num ::= INTEGER|FLOAT */ + 289, /* (387) foreach_clause ::= */ + 289, /* (388) foreach_clause ::= FOR EACH ROW */ + 292, /* (389) trnm ::= nm */ + 293, /* (390) tridxby ::= */ + 294, /* (391) database_kw_opt ::= DATABASE */ + 294, /* (392) database_kw_opt ::= */ + 297, /* (393) kwcolumn_opt ::= */ + 297, /* (394) kwcolumn_opt ::= COLUMNKW */ + 299, /* (395) vtabarglist ::= vtabarg */ + 299, /* (396) vtabarglist ::= vtabarglist COMMA vtabarg */ + 300, /* (397) vtabarg ::= vtabarg vtabargtoken */ + 303, /* (398) anylist ::= */ + 303, /* (399) anylist ::= anylist LP anylist RP */ + 303, /* (400) anylist ::= anylist ANY */ + 266, /* (401) with ::= */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -161123,385 +164551,389 @@ static const signed char yyRuleInfoNRhs[] = { -3, /* (16) ifnotexists ::= IF NOT EXISTS */ -1, /* (17) temp ::= TEMP */ 0, /* (18) temp ::= */ - -5, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */ + -5, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ -2, /* (20) create_table_args ::= AS select */ - 0, /* (21) table_options ::= */ - -2, /* (22) table_options ::= WITHOUT nm */ - -2, /* (23) columnname ::= nm typetoken */ - 0, /* (24) typetoken ::= */ - -4, /* (25) typetoken ::= typename LP signed RP */ - -6, /* (26) typetoken ::= typename LP signed COMMA signed RP */ - -2, /* (27) typename ::= typename ID|STRING */ - 0, /* (28) scanpt ::= */ - 0, /* (29) scantok ::= */ - -2, /* (30) ccons ::= CONSTRAINT nm */ - -3, /* (31) ccons ::= DEFAULT scantok term */ - -4, /* (32) ccons ::= DEFAULT LP expr RP */ - -4, /* (33) ccons ::= DEFAULT PLUS scantok term */ - -4, /* (34) ccons ::= DEFAULT MINUS scantok term */ - -3, /* (35) ccons ::= DEFAULT scantok ID|INDEXED */ - -3, /* (36) ccons ::= NOT NULL onconf */ - -5, /* (37) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - -2, /* (38) ccons ::= UNIQUE onconf */ - -4, /* (39) ccons ::= CHECK LP expr RP */ - -4, /* (40) ccons ::= REFERENCES nm eidlist_opt refargs */ - -1, /* (41) ccons ::= defer_subclause */ - -2, /* (42) ccons ::= COLLATE ID|STRING */ - -3, /* (43) generated ::= LP expr RP */ - -4, /* (44) generated ::= LP expr RP ID */ - 0, /* (45) autoinc ::= */ - -1, /* (46) autoinc ::= AUTOINCR */ - 0, /* (47) refargs ::= */ - -2, /* (48) refargs ::= refargs refarg */ - -2, /* (49) refarg ::= MATCH nm */ - -3, /* (50) refarg ::= ON INSERT refact */ - -3, /* (51) refarg ::= ON DELETE refact */ - -3, /* (52) refarg ::= ON UPDATE refact */ - -2, /* (53) refact ::= SET NULL */ - -2, /* (54) refact ::= SET DEFAULT */ - -1, /* (55) refact ::= CASCADE */ - -1, /* (56) refact ::= RESTRICT */ - -2, /* (57) refact ::= NO ACTION */ - -3, /* (58) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - -2, /* (59) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 0, /* (60) init_deferred_pred_opt ::= */ - -2, /* (61) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - -2, /* (62) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 0, /* (63) conslist_opt ::= */ - -1, /* (64) tconscomma ::= COMMA */ - -2, /* (65) tcons ::= CONSTRAINT nm */ - -7, /* (66) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - -5, /* (67) tcons ::= UNIQUE LP sortlist RP onconf */ - -5, /* (68) tcons ::= CHECK LP expr RP onconf */ - -10, /* (69) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 0, /* (70) defer_subclause_opt ::= */ - 0, /* (71) onconf ::= */ - -3, /* (72) onconf ::= ON CONFLICT resolvetype */ - 0, /* (73) orconf ::= */ - -2, /* (74) orconf ::= OR resolvetype */ - -1, /* (75) resolvetype ::= IGNORE */ - -1, /* (76) resolvetype ::= REPLACE */ - -4, /* (77) cmd ::= DROP TABLE ifexists fullname */ - -2, /* (78) ifexists ::= IF EXISTS */ - 0, /* (79) ifexists ::= */ - -9, /* (80) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - -4, /* (81) cmd ::= DROP VIEW ifexists fullname */ - -1, /* (82) cmd ::= select */ - -3, /* (83) select ::= WITH wqlist selectnowith */ - -4, /* (84) select ::= WITH RECURSIVE wqlist selectnowith */ - -1, /* (85) select ::= selectnowith */ - -3, /* (86) selectnowith ::= selectnowith multiselect_op oneselect */ - -1, /* (87) multiselect_op ::= UNION */ - -2, /* (88) multiselect_op ::= UNION ALL */ - -1, /* (89) multiselect_op ::= EXCEPT|INTERSECT */ - -9, /* (90) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - -10, /* (91) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - -4, /* (92) values ::= VALUES LP nexprlist RP */ - -5, /* (93) values ::= values COMMA LP nexprlist RP */ - -1, /* (94) distinct ::= DISTINCT */ - -1, /* (95) distinct ::= ALL */ - 0, /* (96) distinct ::= */ - 0, /* (97) sclp ::= */ - -5, /* (98) selcollist ::= sclp scanpt expr scanpt as */ - -3, /* (99) selcollist ::= sclp scanpt STAR */ - -5, /* (100) selcollist ::= sclp scanpt nm DOT STAR */ - -2, /* (101) as ::= AS nm */ - 0, /* (102) as ::= */ - 0, /* (103) from ::= */ - -2, /* (104) from ::= FROM seltablist */ - -2, /* (105) stl_prefix ::= seltablist joinop */ - 0, /* (106) stl_prefix ::= */ - -7, /* (107) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ - -9, /* (108) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ - -7, /* (109) seltablist ::= stl_prefix LP select RP as on_opt using_opt */ - -7, /* (110) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ - 0, /* (111) dbnm ::= */ - -2, /* (112) dbnm ::= DOT nm */ - -1, /* (113) fullname ::= nm */ - -3, /* (114) fullname ::= nm DOT nm */ - -1, /* (115) xfullname ::= nm */ - -3, /* (116) xfullname ::= nm DOT nm */ - -5, /* (117) xfullname ::= nm DOT nm AS nm */ - -3, /* (118) xfullname ::= nm AS nm */ - -1, /* (119) joinop ::= COMMA|JOIN */ - -2, /* (120) joinop ::= JOIN_KW JOIN */ - -3, /* (121) joinop ::= JOIN_KW nm JOIN */ - -4, /* (122) joinop ::= JOIN_KW nm nm JOIN */ - -2, /* (123) on_opt ::= ON expr */ - 0, /* (124) on_opt ::= */ - 0, /* (125) indexed_opt ::= */ - -3, /* (126) indexed_opt ::= INDEXED BY nm */ - -2, /* (127) indexed_opt ::= NOT INDEXED */ - -4, /* (128) using_opt ::= USING LP idlist RP */ - 0, /* (129) using_opt ::= */ - 0, /* (130) orderby_opt ::= */ - -3, /* (131) orderby_opt ::= ORDER BY sortlist */ - -5, /* (132) sortlist ::= sortlist COMMA expr sortorder nulls */ - -3, /* (133) sortlist ::= expr sortorder nulls */ - -1, /* (134) sortorder ::= ASC */ - -1, /* (135) sortorder ::= DESC */ - 0, /* (136) sortorder ::= */ - -2, /* (137) nulls ::= NULLS FIRST */ - -2, /* (138) nulls ::= NULLS LAST */ - 0, /* (139) nulls ::= */ - 0, /* (140) groupby_opt ::= */ - -3, /* (141) groupby_opt ::= GROUP BY nexprlist */ - 0, /* (142) having_opt ::= */ - -2, /* (143) having_opt ::= HAVING expr */ - 0, /* (144) limit_opt ::= */ - -2, /* (145) limit_opt ::= LIMIT expr */ - -4, /* (146) limit_opt ::= LIMIT expr OFFSET expr */ - -4, /* (147) limit_opt ::= LIMIT expr COMMA expr */ - -6, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ - 0, /* (149) where_opt ::= */ - -2, /* (150) where_opt ::= WHERE expr */ - 0, /* (151) where_opt_ret ::= */ - -2, /* (152) where_opt_ret ::= WHERE expr */ - -2, /* (153) where_opt_ret ::= RETURNING selcollist */ - -4, /* (154) where_opt_ret ::= WHERE expr RETURNING selcollist */ - -9, /* (155) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ - -5, /* (156) setlist ::= setlist COMMA nm EQ expr */ - -7, /* (157) setlist ::= setlist COMMA LP idlist RP EQ expr */ - -3, /* (158) setlist ::= nm EQ expr */ - -5, /* (159) setlist ::= LP idlist RP EQ expr */ - -7, /* (160) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - -8, /* (161) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 0, /* (162) upsert ::= */ - -2, /* (163) upsert ::= RETURNING selcollist */ - -12, /* (164) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - -9, /* (165) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - -5, /* (166) upsert ::= ON CONFLICT DO NOTHING returning */ - -8, /* (167) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - -2, /* (168) returning ::= RETURNING selcollist */ - -2, /* (169) insert_cmd ::= INSERT orconf */ - -1, /* (170) insert_cmd ::= REPLACE */ - 0, /* (171) idlist_opt ::= */ - -3, /* (172) idlist_opt ::= LP idlist RP */ - -3, /* (173) idlist ::= idlist COMMA nm */ - -1, /* (174) idlist ::= nm */ - -3, /* (175) expr ::= LP expr RP */ - -1, /* (176) expr ::= ID|INDEXED */ - -1, /* (177) expr ::= JOIN_KW */ - -3, /* (178) expr ::= nm DOT nm */ - -5, /* (179) expr ::= nm DOT nm DOT nm */ - -1, /* (180) term ::= NULL|FLOAT|BLOB */ - -1, /* (181) term ::= STRING */ - -1, /* (182) term ::= INTEGER */ - -1, /* (183) expr ::= VARIABLE */ - -3, /* (184) expr ::= expr COLLATE ID|STRING */ - -6, /* (185) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (186) expr ::= ID|INDEXED LP distinct exprlist RP */ - -4, /* (187) expr ::= ID|INDEXED LP STAR RP */ - -6, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - -5, /* (189) expr ::= ID|INDEXED LP STAR RP filter_over */ - -1, /* (190) term ::= CTIME_KW */ - -5, /* (191) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (192) expr ::= expr AND expr */ - -3, /* (193) expr ::= expr OR expr */ - -3, /* (194) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (195) expr ::= expr EQ|NE expr */ - -3, /* (196) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (197) expr ::= expr PLUS|MINUS expr */ - -3, /* (198) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (199) expr ::= expr CONCAT expr */ - -2, /* (200) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (201) expr ::= expr likeop expr */ - -5, /* (202) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (203) expr ::= expr ISNULL|NOTNULL */ - -3, /* (204) expr ::= expr NOT NULL */ - -3, /* (205) expr ::= expr IS expr */ - -4, /* (206) expr ::= expr IS NOT expr */ - -2, /* (207) expr ::= NOT expr */ - -2, /* (208) expr ::= BITNOT expr */ - -2, /* (209) expr ::= PLUS|MINUS expr */ - -1, /* (210) between_op ::= BETWEEN */ - -2, /* (211) between_op ::= NOT BETWEEN */ - -5, /* (212) expr ::= expr between_op expr AND expr */ - -1, /* (213) in_op ::= IN */ - -2, /* (214) in_op ::= NOT IN */ - -5, /* (215) expr ::= expr in_op LP exprlist RP */ - -3, /* (216) expr ::= LP select RP */ - -5, /* (217) expr ::= expr in_op LP select RP */ - -5, /* (218) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (219) expr ::= EXISTS LP select RP */ - -5, /* (220) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (221) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (222) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (223) case_else ::= ELSE expr */ - 0, /* (224) case_else ::= */ - -1, /* (225) case_operand ::= expr */ - 0, /* (226) case_operand ::= */ - 0, /* (227) exprlist ::= */ - -3, /* (228) nexprlist ::= nexprlist COMMA expr */ - -1, /* (229) nexprlist ::= expr */ - 0, /* (230) paren_exprlist ::= */ - -3, /* (231) paren_exprlist ::= LP exprlist RP */ - -12, /* (232) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - -1, /* (233) uniqueflag ::= UNIQUE */ - 0, /* (234) uniqueflag ::= */ - 0, /* (235) eidlist_opt ::= */ - -3, /* (236) eidlist_opt ::= LP eidlist RP */ - -5, /* (237) eidlist ::= eidlist COMMA nm collate sortorder */ - -3, /* (238) eidlist ::= nm collate sortorder */ - 0, /* (239) collate ::= */ - -2, /* (240) collate ::= COLLATE ID|STRING */ - -4, /* (241) cmd ::= DROP INDEX ifexists fullname */ - -2, /* (242) cmd ::= VACUUM vinto */ - -3, /* (243) cmd ::= VACUUM nm vinto */ - -2, /* (244) vinto ::= INTO expr */ - 0, /* (245) vinto ::= */ - -3, /* (246) cmd ::= PRAGMA nm dbnm */ - -5, /* (247) cmd ::= PRAGMA nm dbnm EQ nmnum */ - -6, /* (248) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - -5, /* (249) cmd ::= PRAGMA nm dbnm EQ minus_num */ - -6, /* (250) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - -2, /* (251) plus_num ::= PLUS INTEGER|FLOAT */ - -2, /* (252) minus_num ::= MINUS INTEGER|FLOAT */ - -5, /* (253) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - -11, /* (254) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - -1, /* (255) trigger_time ::= BEFORE|AFTER */ - -2, /* (256) trigger_time ::= INSTEAD OF */ - 0, /* (257) trigger_time ::= */ - -1, /* (258) trigger_event ::= DELETE|INSERT */ - -1, /* (259) trigger_event ::= UPDATE */ - -3, /* (260) trigger_event ::= UPDATE OF idlist */ - 0, /* (261) when_clause ::= */ - -2, /* (262) when_clause ::= WHEN expr */ - -3, /* (263) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - -2, /* (264) trigger_cmd_list ::= trigger_cmd SEMI */ - -3, /* (265) trnm ::= nm DOT nm */ - -3, /* (266) tridxby ::= INDEXED BY nm */ - -2, /* (267) tridxby ::= NOT INDEXED */ - -9, /* (268) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - -8, /* (269) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - -6, /* (270) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - -3, /* (271) trigger_cmd ::= scanpt select scanpt */ - -4, /* (272) expr ::= RAISE LP IGNORE RP */ - -6, /* (273) expr ::= RAISE LP raisetype COMMA nm RP */ - -1, /* (274) raisetype ::= ROLLBACK */ - -1, /* (275) raisetype ::= ABORT */ - -1, /* (276) raisetype ::= FAIL */ - -4, /* (277) cmd ::= DROP TRIGGER ifexists fullname */ - -6, /* (278) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - -3, /* (279) cmd ::= DETACH database_kw_opt expr */ - 0, /* (280) key_opt ::= */ - -2, /* (281) key_opt ::= KEY expr */ - -1, /* (282) cmd ::= REINDEX */ - -3, /* (283) cmd ::= REINDEX nm dbnm */ - -1, /* (284) cmd ::= ANALYZE */ - -3, /* (285) cmd ::= ANALYZE nm dbnm */ - -6, /* (286) cmd ::= ALTER TABLE fullname RENAME TO nm */ - -7, /* (287) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - -6, /* (288) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - -1, /* (289) add_column_fullname ::= fullname */ - -8, /* (290) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - -1, /* (291) cmd ::= create_vtab */ - -4, /* (292) cmd ::= create_vtab LP vtabarglist RP */ - -8, /* (293) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 0, /* (294) vtabarg ::= */ - -1, /* (295) vtabargtoken ::= ANY */ - -3, /* (296) vtabargtoken ::= lp anylist RP */ - -1, /* (297) lp ::= LP */ - -2, /* (298) with ::= WITH wqlist */ - -3, /* (299) with ::= WITH RECURSIVE wqlist */ - -1, /* (300) wqas ::= AS */ - -2, /* (301) wqas ::= AS MATERIALIZED */ - -3, /* (302) wqas ::= AS NOT MATERIALIZED */ - -6, /* (303) wqitem ::= nm eidlist_opt wqas LP select RP */ - -1, /* (304) wqlist ::= wqitem */ - -3, /* (305) wqlist ::= wqlist COMMA wqitem */ - -1, /* (306) windowdefn_list ::= windowdefn */ - -3, /* (307) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (308) windowdefn ::= nm AS LP window RP */ - -5, /* (309) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (310) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (311) window ::= ORDER BY sortlist frame_opt */ - -5, /* (312) window ::= nm ORDER BY sortlist frame_opt */ - -1, /* (313) window ::= frame_opt */ - -2, /* (314) window ::= nm frame_opt */ - 0, /* (315) frame_opt ::= */ - -3, /* (316) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (317) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (318) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (319) frame_bound_s ::= frame_bound */ - -2, /* (320) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (321) frame_bound_e ::= frame_bound */ - -2, /* (322) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (323) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (324) frame_bound ::= CURRENT ROW */ - 0, /* (325) frame_exclude_opt ::= */ - -2, /* (326) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (327) frame_exclude ::= NO OTHERS */ - -2, /* (328) frame_exclude ::= CURRENT ROW */ - -1, /* (329) frame_exclude ::= GROUP|TIES */ - -2, /* (330) window_clause ::= WINDOW windowdefn_list */ - -2, /* (331) filter_over ::= filter_clause over_clause */ - -1, /* (332) filter_over ::= over_clause */ - -1, /* (333) filter_over ::= filter_clause */ - -4, /* (334) over_clause ::= OVER LP window RP */ - -2, /* (335) over_clause ::= OVER nm */ - -5, /* (336) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (337) input ::= cmdlist */ - -2, /* (338) cmdlist ::= cmdlist ecmd */ - -1, /* (339) cmdlist ::= ecmd */ - -1, /* (340) ecmd ::= SEMI */ - -2, /* (341) ecmd ::= cmdx SEMI */ - -3, /* (342) ecmd ::= explain cmdx SEMI */ - 0, /* (343) trans_opt ::= */ - -1, /* (344) trans_opt ::= TRANSACTION */ - -2, /* (345) trans_opt ::= TRANSACTION nm */ - -1, /* (346) savepoint_opt ::= SAVEPOINT */ - 0, /* (347) savepoint_opt ::= */ - -2, /* (348) cmd ::= create_table create_table_args */ - -4, /* (349) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (350) columnlist ::= columnname carglist */ - -1, /* (351) nm ::= ID|INDEXED */ - -1, /* (352) nm ::= STRING */ - -1, /* (353) nm ::= JOIN_KW */ - -1, /* (354) typetoken ::= typename */ - -1, /* (355) typename ::= ID|STRING */ - -1, /* (356) signed ::= plus_num */ - -1, /* (357) signed ::= minus_num */ - -2, /* (358) carglist ::= carglist ccons */ - 0, /* (359) carglist ::= */ - -2, /* (360) ccons ::= NULL onconf */ - -4, /* (361) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (362) ccons ::= AS generated */ - -2, /* (363) conslist_opt ::= COMMA conslist */ - -3, /* (364) conslist ::= conslist tconscomma tcons */ - -1, /* (365) conslist ::= tcons */ - 0, /* (366) tconscomma ::= */ - -1, /* (367) defer_subclause_opt ::= defer_subclause */ - -1, /* (368) resolvetype ::= raisetype */ - -1, /* (369) selectnowith ::= oneselect */ - -1, /* (370) oneselect ::= values */ - -2, /* (371) sclp ::= selcollist COMMA */ - -1, /* (372) as ::= ID|STRING */ - 0, /* (373) returning ::= */ - -1, /* (374) expr ::= term */ - -1, /* (375) likeop ::= LIKE_KW|MATCH */ - -1, /* (376) exprlist ::= nexprlist */ - -1, /* (377) nmnum ::= plus_num */ - -1, /* (378) nmnum ::= nm */ - -1, /* (379) nmnum ::= ON */ - -1, /* (380) nmnum ::= DELETE */ - -1, /* (381) nmnum ::= DEFAULT */ - -1, /* (382) plus_num ::= INTEGER|FLOAT */ - 0, /* (383) foreach_clause ::= */ - -3, /* (384) foreach_clause ::= FOR EACH ROW */ - -1, /* (385) trnm ::= nm */ - 0, /* (386) tridxby ::= */ - -1, /* (387) database_kw_opt ::= DATABASE */ - 0, /* (388) database_kw_opt ::= */ - 0, /* (389) kwcolumn_opt ::= */ - -1, /* (390) kwcolumn_opt ::= COLUMNKW */ - -1, /* (391) vtabarglist ::= vtabarg */ - -3, /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (393) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (394) anylist ::= */ - -4, /* (395) anylist ::= anylist LP anylist RP */ - -2, /* (396) anylist ::= anylist ANY */ - 0, /* (397) with ::= */ + 0, /* (21) table_option_set ::= */ + -3, /* (22) table_option_set ::= table_option_set COMMA table_option */ + -2, /* (23) table_option ::= WITHOUT nm */ + -1, /* (24) table_option ::= nm */ + -2, /* (25) columnname ::= nm typetoken */ + 0, /* (26) typetoken ::= */ + -4, /* (27) typetoken ::= typename LP signed RP */ + -6, /* (28) typetoken ::= typename LP signed COMMA signed RP */ + -2, /* (29) typename ::= typename ID|STRING */ + 0, /* (30) scanpt ::= */ + 0, /* (31) scantok ::= */ + -2, /* (32) ccons ::= CONSTRAINT nm */ + -3, /* (33) ccons ::= DEFAULT scantok term */ + -4, /* (34) ccons ::= DEFAULT LP expr RP */ + -4, /* (35) ccons ::= DEFAULT PLUS scantok term */ + -4, /* (36) ccons ::= DEFAULT MINUS scantok term */ + -3, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ + -3, /* (38) ccons ::= NOT NULL onconf */ + -5, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + -2, /* (40) ccons ::= UNIQUE onconf */ + -4, /* (41) ccons ::= CHECK LP expr RP */ + -4, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ + -1, /* (43) ccons ::= defer_subclause */ + -2, /* (44) ccons ::= COLLATE ID|STRING */ + -3, /* (45) generated ::= LP expr RP */ + -4, /* (46) generated ::= LP expr RP ID */ + 0, /* (47) autoinc ::= */ + -1, /* (48) autoinc ::= AUTOINCR */ + 0, /* (49) refargs ::= */ + -2, /* (50) refargs ::= refargs refarg */ + -2, /* (51) refarg ::= MATCH nm */ + -3, /* (52) refarg ::= ON INSERT refact */ + -3, /* (53) refarg ::= ON DELETE refact */ + -3, /* (54) refarg ::= ON UPDATE refact */ + -2, /* (55) refact ::= SET NULL */ + -2, /* (56) refact ::= SET DEFAULT */ + -1, /* (57) refact ::= CASCADE */ + -1, /* (58) refact ::= RESTRICT */ + -2, /* (59) refact ::= NO ACTION */ + -3, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + -2, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 0, /* (62) init_deferred_pred_opt ::= */ + -2, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + -2, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 0, /* (65) conslist_opt ::= */ + -1, /* (66) tconscomma ::= COMMA */ + -2, /* (67) tcons ::= CONSTRAINT nm */ + -7, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + -5, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ + -5, /* (70) tcons ::= CHECK LP expr RP onconf */ + -10, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 0, /* (72) defer_subclause_opt ::= */ + 0, /* (73) onconf ::= */ + -3, /* (74) onconf ::= ON CONFLICT resolvetype */ + 0, /* (75) orconf ::= */ + -2, /* (76) orconf ::= OR resolvetype */ + -1, /* (77) resolvetype ::= IGNORE */ + -1, /* (78) resolvetype ::= REPLACE */ + -4, /* (79) cmd ::= DROP TABLE ifexists fullname */ + -2, /* (80) ifexists ::= IF EXISTS */ + 0, /* (81) ifexists ::= */ + -9, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + -4, /* (83) cmd ::= DROP VIEW ifexists fullname */ + -1, /* (84) cmd ::= select */ + -3, /* (85) select ::= WITH wqlist selectnowith */ + -4, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ + -1, /* (87) select ::= selectnowith */ + -3, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ + -1, /* (89) multiselect_op ::= UNION */ + -2, /* (90) multiselect_op ::= UNION ALL */ + -1, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ + -9, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + -10, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + -4, /* (94) values ::= VALUES LP nexprlist RP */ + -5, /* (95) values ::= values COMMA LP nexprlist RP */ + -1, /* (96) distinct ::= DISTINCT */ + -1, /* (97) distinct ::= ALL */ + 0, /* (98) distinct ::= */ + 0, /* (99) sclp ::= */ + -5, /* (100) selcollist ::= sclp scanpt expr scanpt as */ + -3, /* (101) selcollist ::= sclp scanpt STAR */ + -5, /* (102) selcollist ::= sclp scanpt nm DOT STAR */ + -2, /* (103) as ::= AS nm */ + 0, /* (104) as ::= */ + 0, /* (105) from ::= */ + -2, /* (106) from ::= FROM seltablist */ + -2, /* (107) stl_prefix ::= seltablist joinop */ + 0, /* (108) stl_prefix ::= */ + -7, /* (109) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ + -9, /* (110) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ + -7, /* (111) seltablist ::= stl_prefix LP select RP as on_opt using_opt */ + -7, /* (112) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ + 0, /* (113) dbnm ::= */ + -2, /* (114) dbnm ::= DOT nm */ + -1, /* (115) fullname ::= nm */ + -3, /* (116) fullname ::= nm DOT nm */ + -1, /* (117) xfullname ::= nm */ + -3, /* (118) xfullname ::= nm DOT nm */ + -5, /* (119) xfullname ::= nm DOT nm AS nm */ + -3, /* (120) xfullname ::= nm AS nm */ + -1, /* (121) joinop ::= COMMA|JOIN */ + -2, /* (122) joinop ::= JOIN_KW JOIN */ + -3, /* (123) joinop ::= JOIN_KW nm JOIN */ + -4, /* (124) joinop ::= JOIN_KW nm nm JOIN */ + -2, /* (125) on_opt ::= ON expr */ + 0, /* (126) on_opt ::= */ + 0, /* (127) indexed_opt ::= */ + -3, /* (128) indexed_opt ::= INDEXED BY nm */ + -2, /* (129) indexed_opt ::= NOT INDEXED */ + -4, /* (130) using_opt ::= USING LP idlist RP */ + 0, /* (131) using_opt ::= */ + 0, /* (132) orderby_opt ::= */ + -3, /* (133) orderby_opt ::= ORDER BY sortlist */ + -5, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */ + -3, /* (135) sortlist ::= expr sortorder nulls */ + -1, /* (136) sortorder ::= ASC */ + -1, /* (137) sortorder ::= DESC */ + 0, /* (138) sortorder ::= */ + -2, /* (139) nulls ::= NULLS FIRST */ + -2, /* (140) nulls ::= NULLS LAST */ + 0, /* (141) nulls ::= */ + 0, /* (142) groupby_opt ::= */ + -3, /* (143) groupby_opt ::= GROUP BY nexprlist */ + 0, /* (144) having_opt ::= */ + -2, /* (145) having_opt ::= HAVING expr */ + 0, /* (146) limit_opt ::= */ + -2, /* (147) limit_opt ::= LIMIT expr */ + -4, /* (148) limit_opt ::= LIMIT expr OFFSET expr */ + -4, /* (149) limit_opt ::= LIMIT expr COMMA expr */ + -6, /* (150) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + 0, /* (151) where_opt ::= */ + -2, /* (152) where_opt ::= WHERE expr */ + 0, /* (153) where_opt_ret ::= */ + -2, /* (154) where_opt_ret ::= WHERE expr */ + -2, /* (155) where_opt_ret ::= RETURNING selcollist */ + -4, /* (156) where_opt_ret ::= WHERE expr RETURNING selcollist */ + -9, /* (157) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + -5, /* (158) setlist ::= setlist COMMA nm EQ expr */ + -7, /* (159) setlist ::= setlist COMMA LP idlist RP EQ expr */ + -3, /* (160) setlist ::= nm EQ expr */ + -5, /* (161) setlist ::= LP idlist RP EQ expr */ + -7, /* (162) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + -8, /* (163) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 0, /* (164) upsert ::= */ + -2, /* (165) upsert ::= RETURNING selcollist */ + -12, /* (166) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + -9, /* (167) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + -5, /* (168) upsert ::= ON CONFLICT DO NOTHING returning */ + -8, /* (169) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + -2, /* (170) returning ::= RETURNING selcollist */ + -2, /* (171) insert_cmd ::= INSERT orconf */ + -1, /* (172) insert_cmd ::= REPLACE */ + 0, /* (173) idlist_opt ::= */ + -3, /* (174) idlist_opt ::= LP idlist RP */ + -3, /* (175) idlist ::= idlist COMMA nm */ + -1, /* (176) idlist ::= nm */ + -3, /* (177) expr ::= LP expr RP */ + -1, /* (178) expr ::= ID|INDEXED */ + -1, /* (179) expr ::= JOIN_KW */ + -3, /* (180) expr ::= nm DOT nm */ + -5, /* (181) expr ::= nm DOT nm DOT nm */ + -1, /* (182) term ::= NULL|FLOAT|BLOB */ + -1, /* (183) term ::= STRING */ + -1, /* (184) term ::= INTEGER */ + -1, /* (185) expr ::= VARIABLE */ + -3, /* (186) expr ::= expr COLLATE ID|STRING */ + -6, /* (187) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP */ + -4, /* (189) expr ::= ID|INDEXED LP STAR RP */ + -6, /* (190) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + -5, /* (191) expr ::= ID|INDEXED LP STAR RP filter_over */ + -1, /* (192) term ::= CTIME_KW */ + -5, /* (193) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (194) expr ::= expr AND expr */ + -3, /* (195) expr ::= expr OR expr */ + -3, /* (196) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (197) expr ::= expr EQ|NE expr */ + -3, /* (198) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (199) expr ::= expr PLUS|MINUS expr */ + -3, /* (200) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (201) expr ::= expr CONCAT expr */ + -2, /* (202) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (203) expr ::= expr likeop expr */ + -5, /* (204) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (205) expr ::= expr ISNULL|NOTNULL */ + -3, /* (206) expr ::= expr NOT NULL */ + -3, /* (207) expr ::= expr IS expr */ + -4, /* (208) expr ::= expr IS NOT expr */ + -2, /* (209) expr ::= NOT expr */ + -2, /* (210) expr ::= BITNOT expr */ + -2, /* (211) expr ::= PLUS|MINUS expr */ + -3, /* (212) expr ::= expr PTR expr */ + -1, /* (213) between_op ::= BETWEEN */ + -2, /* (214) between_op ::= NOT BETWEEN */ + -5, /* (215) expr ::= expr between_op expr AND expr */ + -1, /* (216) in_op ::= IN */ + -2, /* (217) in_op ::= NOT IN */ + -5, /* (218) expr ::= expr in_op LP exprlist RP */ + -3, /* (219) expr ::= LP select RP */ + -5, /* (220) expr ::= expr in_op LP select RP */ + -5, /* (221) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (222) expr ::= EXISTS LP select RP */ + -5, /* (223) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (224) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (225) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (226) case_else ::= ELSE expr */ + 0, /* (227) case_else ::= */ + -1, /* (228) case_operand ::= expr */ + 0, /* (229) case_operand ::= */ + 0, /* (230) exprlist ::= */ + -3, /* (231) nexprlist ::= nexprlist COMMA expr */ + -1, /* (232) nexprlist ::= expr */ + 0, /* (233) paren_exprlist ::= */ + -3, /* (234) paren_exprlist ::= LP exprlist RP */ + -12, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + -1, /* (236) uniqueflag ::= UNIQUE */ + 0, /* (237) uniqueflag ::= */ + 0, /* (238) eidlist_opt ::= */ + -3, /* (239) eidlist_opt ::= LP eidlist RP */ + -5, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */ + -3, /* (241) eidlist ::= nm collate sortorder */ + 0, /* (242) collate ::= */ + -2, /* (243) collate ::= COLLATE ID|STRING */ + -4, /* (244) cmd ::= DROP INDEX ifexists fullname */ + -2, /* (245) cmd ::= VACUUM vinto */ + -3, /* (246) cmd ::= VACUUM nm vinto */ + -2, /* (247) vinto ::= INTO expr */ + 0, /* (248) vinto ::= */ + -3, /* (249) cmd ::= PRAGMA nm dbnm */ + -5, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */ + -6, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + -5, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */ + -6, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + -2, /* (254) plus_num ::= PLUS INTEGER|FLOAT */ + -2, /* (255) minus_num ::= MINUS INTEGER|FLOAT */ + -5, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + -11, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + -1, /* (258) trigger_time ::= BEFORE|AFTER */ + -2, /* (259) trigger_time ::= INSTEAD OF */ + 0, /* (260) trigger_time ::= */ + -1, /* (261) trigger_event ::= DELETE|INSERT */ + -1, /* (262) trigger_event ::= UPDATE */ + -3, /* (263) trigger_event ::= UPDATE OF idlist */ + 0, /* (264) when_clause ::= */ + -2, /* (265) when_clause ::= WHEN expr */ + -3, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + -2, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */ + -3, /* (268) trnm ::= nm DOT nm */ + -3, /* (269) tridxby ::= INDEXED BY nm */ + -2, /* (270) tridxby ::= NOT INDEXED */ + -9, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + -8, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + -6, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + -3, /* (274) trigger_cmd ::= scanpt select scanpt */ + -4, /* (275) expr ::= RAISE LP IGNORE RP */ + -6, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */ + -1, /* (277) raisetype ::= ROLLBACK */ + -1, /* (278) raisetype ::= ABORT */ + -1, /* (279) raisetype ::= FAIL */ + -4, /* (280) cmd ::= DROP TRIGGER ifexists fullname */ + -6, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + -3, /* (282) cmd ::= DETACH database_kw_opt expr */ + 0, /* (283) key_opt ::= */ + -2, /* (284) key_opt ::= KEY expr */ + -1, /* (285) cmd ::= REINDEX */ + -3, /* (286) cmd ::= REINDEX nm dbnm */ + -1, /* (287) cmd ::= ANALYZE */ + -3, /* (288) cmd ::= ANALYZE nm dbnm */ + -6, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */ + -7, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + -6, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + -1, /* (292) add_column_fullname ::= fullname */ + -8, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + -1, /* (294) cmd ::= create_vtab */ + -4, /* (295) cmd ::= create_vtab LP vtabarglist RP */ + -8, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 0, /* (297) vtabarg ::= */ + -1, /* (298) vtabargtoken ::= ANY */ + -3, /* (299) vtabargtoken ::= lp anylist RP */ + -1, /* (300) lp ::= LP */ + -2, /* (301) with ::= WITH wqlist */ + -3, /* (302) with ::= WITH RECURSIVE wqlist */ + -1, /* (303) wqas ::= AS */ + -2, /* (304) wqas ::= AS MATERIALIZED */ + -3, /* (305) wqas ::= AS NOT MATERIALIZED */ + -6, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */ + -1, /* (307) wqlist ::= wqitem */ + -3, /* (308) wqlist ::= wqlist COMMA wqitem */ + -1, /* (309) windowdefn_list ::= windowdefn */ + -3, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (311) windowdefn ::= nm AS LP window RP */ + -5, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (314) window ::= ORDER BY sortlist frame_opt */ + -5, /* (315) window ::= nm ORDER BY sortlist frame_opt */ + -1, /* (316) window ::= frame_opt */ + -2, /* (317) window ::= nm frame_opt */ + 0, /* (318) frame_opt ::= */ + -3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (322) frame_bound_s ::= frame_bound */ + -2, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (324) frame_bound_e ::= frame_bound */ + -2, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (327) frame_bound ::= CURRENT ROW */ + 0, /* (328) frame_exclude_opt ::= */ + -2, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (330) frame_exclude ::= NO OTHERS */ + -2, /* (331) frame_exclude ::= CURRENT ROW */ + -1, /* (332) frame_exclude ::= GROUP|TIES */ + -2, /* (333) window_clause ::= WINDOW windowdefn_list */ + -2, /* (334) filter_over ::= filter_clause over_clause */ + -1, /* (335) filter_over ::= over_clause */ + -1, /* (336) filter_over ::= filter_clause */ + -4, /* (337) over_clause ::= OVER LP window RP */ + -2, /* (338) over_clause ::= OVER nm */ + -5, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (340) input ::= cmdlist */ + -2, /* (341) cmdlist ::= cmdlist ecmd */ + -1, /* (342) cmdlist ::= ecmd */ + -1, /* (343) ecmd ::= SEMI */ + -2, /* (344) ecmd ::= cmdx SEMI */ + -3, /* (345) ecmd ::= explain cmdx SEMI */ + 0, /* (346) trans_opt ::= */ + -1, /* (347) trans_opt ::= TRANSACTION */ + -2, /* (348) trans_opt ::= TRANSACTION nm */ + -1, /* (349) savepoint_opt ::= SAVEPOINT */ + 0, /* (350) savepoint_opt ::= */ + -2, /* (351) cmd ::= create_table create_table_args */ + -1, /* (352) table_option_set ::= table_option */ + -4, /* (353) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (354) columnlist ::= columnname carglist */ + -1, /* (355) nm ::= ID|INDEXED */ + -1, /* (356) nm ::= STRING */ + -1, /* (357) nm ::= JOIN_KW */ + -1, /* (358) typetoken ::= typename */ + -1, /* (359) typename ::= ID|STRING */ + -1, /* (360) signed ::= plus_num */ + -1, /* (361) signed ::= minus_num */ + -2, /* (362) carglist ::= carglist ccons */ + 0, /* (363) carglist ::= */ + -2, /* (364) ccons ::= NULL onconf */ + -4, /* (365) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (366) ccons ::= AS generated */ + -2, /* (367) conslist_opt ::= COMMA conslist */ + -3, /* (368) conslist ::= conslist tconscomma tcons */ + -1, /* (369) conslist ::= tcons */ + 0, /* (370) tconscomma ::= */ + -1, /* (371) defer_subclause_opt ::= defer_subclause */ + -1, /* (372) resolvetype ::= raisetype */ + -1, /* (373) selectnowith ::= oneselect */ + -1, /* (374) oneselect ::= values */ + -2, /* (375) sclp ::= selcollist COMMA */ + -1, /* (376) as ::= ID|STRING */ + 0, /* (377) returning ::= */ + -1, /* (378) expr ::= term */ + -1, /* (379) likeop ::= LIKE_KW|MATCH */ + -1, /* (380) exprlist ::= nexprlist */ + -1, /* (381) nmnum ::= plus_num */ + -1, /* (382) nmnum ::= nm */ + -1, /* (383) nmnum ::= ON */ + -1, /* (384) nmnum ::= DELETE */ + -1, /* (385) nmnum ::= DEFAULT */ + -1, /* (386) plus_num ::= INTEGER|FLOAT */ + 0, /* (387) foreach_clause ::= */ + -3, /* (388) foreach_clause ::= FOR EACH ROW */ + -1, /* (389) trnm ::= nm */ + 0, /* (390) tridxby ::= */ + -1, /* (391) database_kw_opt ::= DATABASE */ + 0, /* (392) database_kw_opt ::= */ + 0, /* (393) kwcolumn_opt ::= */ + -1, /* (394) kwcolumn_opt ::= COLUMNKW */ + -1, /* (395) vtabarglist ::= vtabarg */ + -3, /* (396) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (397) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (398) anylist ::= */ + -4, /* (399) anylist ::= anylist LP anylist RP */ + -2, /* (400) anylist ::= anylist ANY */ + 0, /* (401) with ::= */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -161553,16 +164985,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy376);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy394);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy376 = TK_DEFERRED;} +{yymsp[1].minor.yy394 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 318: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==318); -{yymsp[0].minor.yy376 = yymsp[0].major; /*A-overwrites-X*/} + case 321: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==321); +{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -161585,7 +165017,7 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy376,0,0,yymsp[-2].minor.yy376); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy394,0,0,yymsp[-2].minor.yy394); } break; case 14: /* createkw ::= CREATE */ @@ -161593,95 +165025,112 @@ static YYACTIONTYPE yy_reduce( break; case 15: /* ifnotexists ::= */ case 18: /* temp ::= */ yytestcase(yyruleno==18); - case 21: /* table_options ::= */ yytestcase(yyruleno==21); - case 45: /* autoinc ::= */ yytestcase(yyruleno==45); - case 60: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==60); - case 70: /* defer_subclause_opt ::= */ yytestcase(yyruleno==70); - case 79: /* ifexists ::= */ yytestcase(yyruleno==79); - case 96: /* distinct ::= */ yytestcase(yyruleno==96); - case 239: /* collate ::= */ yytestcase(yyruleno==239); -{yymsp[1].minor.yy376 = 0;} + case 47: /* autoinc ::= */ yytestcase(yyruleno==47); + case 62: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==62); + case 72: /* defer_subclause_opt ::= */ yytestcase(yyruleno==72); + case 81: /* ifexists ::= */ yytestcase(yyruleno==81); + case 98: /* distinct ::= */ yytestcase(yyruleno==98); + case 242: /* collate ::= */ yytestcase(yyruleno==242); +{yymsp[1].minor.yy394 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy376 = 1;} +{yymsp[-2].minor.yy394 = 1;} break; case 17: /* temp ::= TEMP */ -{yymsp[0].minor.yy376 = pParse->db->init.busy==0;} +{yymsp[0].minor.yy394 = pParse->db->init.busy==0;} break; - case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_options */ + case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_option_set */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy376,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy285,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy81); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy81); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy47); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy47); } break; - case 22: /* table_options ::= WITHOUT nm */ + case 21: /* table_option_set ::= */ +{yymsp[1].minor.yy285 = 0;} + break; + case 22: /* table_option_set ::= table_option_set COMMA table_option */ +{yylhsminor.yy285 = yymsp[-2].minor.yy285|yymsp[0].minor.yy285;} + yymsp[-2].minor.yy285 = yylhsminor.yy285; + break; + case 23: /* table_option ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy376 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy285 = TF_WithoutRowid | TF_NoVisibleRowid; + }else{ + yymsp[-1].minor.yy285 = 0; + sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); + } +} + break; + case 24: /* table_option ::= nm */ +{ + if( yymsp[0].minor.yy0.n==6 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"strict",6)==0 ){ + yylhsminor.yy285 = TF_Strict; }else{ - yymsp[-1].minor.yy376 = 0; + yylhsminor.yy285 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } + yymsp[0].minor.yy285 = yylhsminor.yy285; break; - case 23: /* columnname ::= nm typetoken */ -{sqlite3AddColumn(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0);} + case 25: /* columnname ::= nm typetoken */ +{sqlite3AddColumn(pParse,yymsp[-1].minor.yy0,yymsp[0].minor.yy0);} break; - case 24: /* typetoken ::= */ - case 63: /* conslist_opt ::= */ yytestcase(yyruleno==63); - case 102: /* as ::= */ yytestcase(yyruleno==102); + case 26: /* typetoken ::= */ + case 65: /* conslist_opt ::= */ yytestcase(yyruleno==65); + case 104: /* as ::= */ yytestcase(yyruleno==104); {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = 0;} break; - case 25: /* typetoken ::= typename LP signed RP */ + case 27: /* typetoken ::= typename LP signed RP */ { yymsp[-3].minor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-3].minor.yy0.z); } break; - case 26: /* typetoken ::= typename LP signed COMMA signed RP */ + case 28: /* typetoken ::= typename LP signed COMMA signed RP */ { yymsp[-5].minor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-5].minor.yy0.z); } break; - case 27: /* typename ::= typename ID|STRING */ + case 29: /* typename ::= typename ID|STRING */ {yymsp[-1].minor.yy0.n=yymsp[0].minor.yy0.n+(int)(yymsp[0].minor.yy0.z-yymsp[-1].minor.yy0.z);} break; - case 28: /* scanpt ::= */ + case 30: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy504 = yyLookaheadToken.z; + yymsp[1].minor.yy522 = yyLookaheadToken.z; } break; - case 29: /* scantok ::= */ + case 31: /* scantok ::= */ { assert( yyLookahead!=YYNOCODE ); yymsp[1].minor.yy0 = yyLookaheadToken; } break; - case 30: /* ccons ::= CONSTRAINT nm */ - case 65: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==65); + case 32: /* ccons ::= CONSTRAINT nm */ + case 67: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==67); {pParse->constraintName = yymsp[0].minor.yy0;} break; - case 31: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy404,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} + case 33: /* ccons ::= DEFAULT scantok term */ +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy528,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; - case 32: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy404,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} + case 34: /* ccons ::= DEFAULT LP expr RP */ +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy528,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; - case 33: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy404,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} + case 35: /* ccons ::= DEFAULT PLUS scantok term */ +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy528,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; - case 34: /* ccons ::= DEFAULT MINUS scantok term */ + case 36: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy404, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy528, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; - case 35: /* ccons ::= DEFAULT scantok ID|INDEXED */ + case 37: /* ccons ::= DEFAULT scantok ID|INDEXED */ { Expr *p = tokenExpr(pParse, TK_STRING, yymsp[0].minor.yy0); if( p ){ @@ -161691,162 +165140,162 @@ static YYACTIONTYPE yy_reduce( sqlite3AddDefaultValue(pParse,p,yymsp[0].minor.yy0.z,yymsp[0].minor.yy0.z+yymsp[0].minor.yy0.n); } break; - case 36: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy376);} + case 38: /* ccons ::= NOT NULL onconf */ +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy394);} break; - case 37: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy376,yymsp[0].minor.yy376,yymsp[-2].minor.yy376);} + case 39: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy394,yymsp[0].minor.yy394,yymsp[-2].minor.yy394);} break; - case 38: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy376,0,0,0,0, + case 40: /* ccons ::= UNIQUE onconf */ +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy394,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; - case 39: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy404,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} + case 41: /* ccons ::= CHECK LP expr RP */ +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy528,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; - case 40: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy70,yymsp[0].minor.yy376);} + case 42: /* ccons ::= REFERENCES nm eidlist_opt refargs */ +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy322,yymsp[0].minor.yy394);} break; - case 41: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy376);} + case 43: /* ccons ::= defer_subclause */ +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy394);} break; - case 42: /* ccons ::= COLLATE ID|STRING */ + case 44: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; - case 43: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy404,0);} + case 45: /* generated ::= LP expr RP */ +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy528,0);} break; - case 44: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy404,&yymsp[0].minor.yy0);} + case 46: /* generated ::= LP expr RP ID */ +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy528,&yymsp[0].minor.yy0);} break; - case 46: /* autoinc ::= AUTOINCR */ -{yymsp[0].minor.yy376 = 1;} + case 48: /* autoinc ::= AUTOINCR */ +{yymsp[0].minor.yy394 = 1;} break; - case 47: /* refargs ::= */ -{ yymsp[1].minor.yy376 = OE_None*0x0101; /* EV: R-19803-45884 */} + case 49: /* refargs ::= */ +{ yymsp[1].minor.yy394 = OE_None*0x0101; /* EV: R-19803-45884 */} break; - case 48: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy376 = (yymsp[-1].minor.yy376 & ~yymsp[0].minor.yy139.mask) | yymsp[0].minor.yy139.value; } + case 50: /* refargs ::= refargs refarg */ +{ yymsp[-1].minor.yy394 = (yymsp[-1].minor.yy394 & ~yymsp[0].minor.yy231.mask) | yymsp[0].minor.yy231.value; } break; - case 49: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy139.value = 0; yymsp[-1].minor.yy139.mask = 0x000000; } + case 51: /* refarg ::= MATCH nm */ +{ yymsp[-1].minor.yy231.value = 0; yymsp[-1].minor.yy231.mask = 0x000000; } break; - case 50: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy139.value = 0; yymsp[-2].minor.yy139.mask = 0x000000; } + case 52: /* refarg ::= ON INSERT refact */ +{ yymsp[-2].minor.yy231.value = 0; yymsp[-2].minor.yy231.mask = 0x000000; } break; - case 51: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy139.value = yymsp[0].minor.yy376; yymsp[-2].minor.yy139.mask = 0x0000ff; } + case 53: /* refarg ::= ON DELETE refact */ +{ yymsp[-2].minor.yy231.value = yymsp[0].minor.yy394; yymsp[-2].minor.yy231.mask = 0x0000ff; } break; - case 52: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy139.value = yymsp[0].minor.yy376<<8; yymsp[-2].minor.yy139.mask = 0x00ff00; } + case 54: /* refarg ::= ON UPDATE refact */ +{ yymsp[-2].minor.yy231.value = yymsp[0].minor.yy394<<8; yymsp[-2].minor.yy231.mask = 0x00ff00; } break; - case 53: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy376 = OE_SetNull; /* EV: R-33326-45252 */} + case 55: /* refact ::= SET NULL */ +{ yymsp[-1].minor.yy394 = OE_SetNull; /* EV: R-33326-45252 */} break; - case 54: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy376 = OE_SetDflt; /* EV: R-33326-45252 */} + case 56: /* refact ::= SET DEFAULT */ +{ yymsp[-1].minor.yy394 = OE_SetDflt; /* EV: R-33326-45252 */} break; - case 55: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy376 = OE_Cascade; /* EV: R-33326-45252 */} + case 57: /* refact ::= CASCADE */ +{ yymsp[0].minor.yy394 = OE_Cascade; /* EV: R-33326-45252 */} break; - case 56: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy376 = OE_Restrict; /* EV: R-33326-45252 */} + case 58: /* refact ::= RESTRICT */ +{ yymsp[0].minor.yy394 = OE_Restrict; /* EV: R-33326-45252 */} break; - case 57: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy376 = OE_None; /* EV: R-33326-45252 */} + case 59: /* refact ::= NO ACTION */ +{ yymsp[-1].minor.yy394 = OE_None; /* EV: R-33326-45252 */} break; - case 58: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy376 = 0;} + case 60: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ +{yymsp[-2].minor.yy394 = 0;} break; - case 59: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - case 74: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==74); - case 169: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==169); -{yymsp[-1].minor.yy376 = yymsp[0].minor.yy376;} + case 61: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + case 76: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==76); + case 171: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==171); +{yymsp[-1].minor.yy394 = yymsp[0].minor.yy394;} break; - case 61: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ - case 78: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==78); - case 211: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==211); - case 214: /* in_op ::= NOT IN */ yytestcase(yyruleno==214); - case 240: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==240); -{yymsp[-1].minor.yy376 = 1;} + case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ + case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); + case 214: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==214); + case 217: /* in_op ::= NOT IN */ yytestcase(yyruleno==217); + case 243: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==243); +{yymsp[-1].minor.yy394 = 1;} break; - case 62: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy376 = 0;} + case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ +{yymsp[-1].minor.yy394 = 0;} break; - case 64: /* tconscomma ::= COMMA */ + case 66: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; - case 66: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy70,yymsp[0].minor.yy376,yymsp[-2].minor.yy376,0);} + case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy322,yymsp[0].minor.yy394,yymsp[-2].minor.yy394,0);} break; - case 67: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy70,yymsp[0].minor.yy376,0,0,0,0, + case 69: /* tcons ::= UNIQUE LP sortlist RP onconf */ +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy322,yymsp[0].minor.yy394,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; - case 68: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy404,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} + case 70: /* tcons ::= CHECK LP expr RP onconf */ +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy528,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; - case 69: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + case 71: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy70, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy70, yymsp[-1].minor.yy376); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy376); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy322, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[-1].minor.yy394); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy394); } break; - case 71: /* onconf ::= */ - case 73: /* orconf ::= */ yytestcase(yyruleno==73); -{yymsp[1].minor.yy376 = OE_Default;} + case 73: /* onconf ::= */ + case 75: /* orconf ::= */ yytestcase(yyruleno==75); +{yymsp[1].minor.yy394 = OE_Default;} break; - case 72: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy376 = yymsp[0].minor.yy376;} + case 74: /* onconf ::= ON CONFLICT resolvetype */ +{yymsp[-2].minor.yy394 = yymsp[0].minor.yy394;} break; - case 75: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy376 = OE_Ignore;} + case 77: /* resolvetype ::= IGNORE */ +{yymsp[0].minor.yy394 = OE_Ignore;} break; - case 76: /* resolvetype ::= REPLACE */ - case 170: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==170); -{yymsp[0].minor.yy376 = OE_Replace;} + case 78: /* resolvetype ::= REPLACE */ + case 172: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==172); +{yymsp[0].minor.yy394 = OE_Replace;} break; - case 77: /* cmd ::= DROP TABLE ifexists fullname */ + case 79: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy153, 0, yymsp[-1].minor.yy376); + sqlite3DropTable(pParse, yymsp[0].minor.yy131, 0, yymsp[-1].minor.yy394); } break; - case 80: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + case 82: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy70, yymsp[0].minor.yy81, yymsp[-7].minor.yy376, yymsp[-5].minor.yy376); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[0].minor.yy47, yymsp[-7].minor.yy394, yymsp[-5].minor.yy394); } break; - case 81: /* cmd ::= DROP VIEW ifexists fullname */ + case 83: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy153, 1, yymsp[-1].minor.yy376); + sqlite3DropTable(pParse, yymsp[0].minor.yy131, 1, yymsp[-1].minor.yy394); } break; - case 82: /* cmd ::= select */ + case 84: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy81, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy81); + sqlite3Select(pParse, yymsp[0].minor.yy47, &dest); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy47); } break; - case 83: /* select ::= WITH wqlist selectnowith */ -{yymsp[-2].minor.yy81 = attachWithToSelect(pParse,yymsp[0].minor.yy81,yymsp[-1].minor.yy103);} + case 85: /* select ::= WITH wqlist selectnowith */ +{yymsp[-2].minor.yy47 = attachWithToSelect(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy521);} break; - case 84: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{yymsp[-3].minor.yy81 = attachWithToSelect(pParse,yymsp[0].minor.yy81,yymsp[-1].minor.yy103);} + case 86: /* select ::= WITH RECURSIVE wqlist selectnowith */ +{yymsp[-3].minor.yy47 = attachWithToSelect(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy521);} break; - case 85: /* select ::= selectnowith */ + case 87: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy81; + Select *p = yymsp[0].minor.yy47; if( p ){ parserDoubleLinkSelect(pParse, p); } - yymsp[0].minor.yy81 = p; /*A-overwrites-X*/ + yymsp[0].minor.yy47 = p; /*A-overwrites-X*/ } break; - case 86: /* selectnowith ::= selectnowith multiselect_op oneselect */ + case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy81; - Select *pLhs = yymsp[-2].minor.yy81; + Select *pRhs = yymsp[0].minor.yy47; + Select *pLhs = yymsp[-2].minor.yy47; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -161856,140 +165305,140 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy376; + pRhs->op = (u8)yymsp[-1].minor.yy394; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy376!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy394!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy81 = pRhs; + yymsp[-2].minor.yy47 = pRhs; } break; - case 87: /* multiselect_op ::= UNION */ - case 89: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==89); -{yymsp[0].minor.yy376 = yymsp[0].major; /*A-overwrites-OP*/} + case 89: /* multiselect_op ::= UNION */ + case 91: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==91); +{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-OP*/} break; - case 88: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy376 = TK_ALL;} + case 90: /* multiselect_op ::= UNION ALL */ +{yymsp[-1].minor.yy394 = TK_ALL;} break; - case 90: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + case 92: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy81 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy70,yymsp[-5].minor.yy153,yymsp[-4].minor.yy404,yymsp[-3].minor.yy70,yymsp[-2].minor.yy404,yymsp[-1].minor.yy70,yymsp[-7].minor.yy376,yymsp[0].minor.yy404); + yymsp[-8].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy322,yymsp[-5].minor.yy131,yymsp[-4].minor.yy528,yymsp[-3].minor.yy322,yymsp[-2].minor.yy528,yymsp[-1].minor.yy322,yymsp[-7].minor.yy394,yymsp[0].minor.yy528); } break; - case 91: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + case 93: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy81 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy70,yymsp[-6].minor.yy153,yymsp[-5].minor.yy404,yymsp[-4].minor.yy70,yymsp[-3].minor.yy404,yymsp[-1].minor.yy70,yymsp[-8].minor.yy376,yymsp[0].minor.yy404); - if( yymsp[-9].minor.yy81 ){ - yymsp[-9].minor.yy81->pWinDefn = yymsp[-2].minor.yy49; + yymsp[-9].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy322,yymsp[-6].minor.yy131,yymsp[-5].minor.yy528,yymsp[-4].minor.yy322,yymsp[-3].minor.yy528,yymsp[-1].minor.yy322,yymsp[-8].minor.yy394,yymsp[0].minor.yy528); + if( yymsp[-9].minor.yy47 ){ + yymsp[-9].minor.yy47->pWinDefn = yymsp[-2].minor.yy41; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy49); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy41); } } break; - case 92: /* values ::= VALUES LP nexprlist RP */ + case 94: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy81 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy70,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy322,0,0,0,0,0,SF_Values,0); } break; - case 93: /* values ::= values COMMA LP nexprlist RP */ + case 95: /* values ::= values COMMA LP nexprlist RP */ { - Select *pRight, *pLeft = yymsp[-4].minor.yy81; - pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy70,0,0,0,0,0,SF_Values|SF_MultiValue,0); + Select *pRight, *pLeft = yymsp[-4].minor.yy47; + pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy322,0,0,0,0,0,SF_Values|SF_MultiValue,0); if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; if( pRight ){ pRight->op = TK_ALL; pRight->pPrior = pLeft; - yymsp[-4].minor.yy81 = pRight; + yymsp[-4].minor.yy47 = pRight; }else{ - yymsp[-4].minor.yy81 = pLeft; + yymsp[-4].minor.yy47 = pLeft; } } break; - case 94: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy376 = SF_Distinct;} + case 96: /* distinct ::= DISTINCT */ +{yymsp[0].minor.yy394 = SF_Distinct;} break; - case 95: /* distinct ::= ALL */ -{yymsp[0].minor.yy376 = SF_All;} + case 97: /* distinct ::= ALL */ +{yymsp[0].minor.yy394 = SF_All;} break; - case 97: /* sclp ::= */ - case 130: /* orderby_opt ::= */ yytestcase(yyruleno==130); - case 140: /* groupby_opt ::= */ yytestcase(yyruleno==140); - case 227: /* exprlist ::= */ yytestcase(yyruleno==227); - case 230: /* paren_exprlist ::= */ yytestcase(yyruleno==230); - case 235: /* eidlist_opt ::= */ yytestcase(yyruleno==235); -{yymsp[1].minor.yy70 = 0;} + case 99: /* sclp ::= */ + case 132: /* orderby_opt ::= */ yytestcase(yyruleno==132); + case 142: /* groupby_opt ::= */ yytestcase(yyruleno==142); + case 230: /* exprlist ::= */ yytestcase(yyruleno==230); + case 233: /* paren_exprlist ::= */ yytestcase(yyruleno==233); + case 238: /* eidlist_opt ::= */ yytestcase(yyruleno==238); +{yymsp[1].minor.yy322 = 0;} break; - case 98: /* selcollist ::= sclp scanpt expr scanpt as */ + case 100: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy70, yymsp[-2].minor.yy404); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy70, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy70,yymsp[-3].minor.yy504,yymsp[-1].minor.yy504); + yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy322, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy322,yymsp[-3].minor.yy522,yymsp[-1].minor.yy522); } break; - case 99: /* selcollist ::= sclp scanpt STAR */ + case 101: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); - yymsp[-2].minor.yy70 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy70, p); + yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, p); } break; - case 100: /* selcollist ::= sclp scanpt nm DOT STAR */ + case 102: /* selcollist ::= sclp scanpt nm DOT STAR */ { Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); - Expr *pLeft = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); + Expr *pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy70, pDot); + yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, pDot); } break; - case 101: /* as ::= AS nm */ - case 112: /* dbnm ::= DOT nm */ yytestcase(yyruleno==112); - case 251: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==251); - case 252: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==252); + case 103: /* as ::= AS nm */ + case 114: /* dbnm ::= DOT nm */ yytestcase(yyruleno==114); + case 254: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==254); + case 255: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==255); {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} break; - case 103: /* from ::= */ - case 106: /* stl_prefix ::= */ yytestcase(yyruleno==106); -{yymsp[1].minor.yy153 = 0;} + case 105: /* from ::= */ + case 108: /* stl_prefix ::= */ yytestcase(yyruleno==108); +{yymsp[1].minor.yy131 = 0;} break; - case 104: /* from ::= FROM seltablist */ + case 106: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy153 = yymsp[0].minor.yy153; - sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy153); + yymsp[-1].minor.yy131 = yymsp[0].minor.yy131; + sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy131); } break; - case 105: /* stl_prefix ::= seltablist joinop */ + case 107: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy153 && yymsp[-1].minor.yy153->nSrc>0) ) yymsp[-1].minor.yy153->a[yymsp[-1].minor.yy153->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy376; + if( ALWAYS(yymsp[-1].minor.yy131 && yymsp[-1].minor.yy131->nSrc>0) ) yymsp[-1].minor.yy131->a[yymsp[-1].minor.yy131->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy394; } break; - case 107: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ + case 109: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ { - yymsp[-6].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy153,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); - sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy153, &yymsp[-2].minor.yy0); + yymsp[-6].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy131,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy528,yymsp[0].minor.yy254); + sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy131, &yymsp[-2].minor.yy0); } break; - case 108: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ + case 110: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ { - yymsp[-8].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy153,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); - sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy153, yymsp[-4].minor.yy70); + yymsp[-8].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy131,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy528,yymsp[0].minor.yy254); + sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy131, yymsp[-4].minor.yy322); } break; - case 109: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */ + case 111: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */ { - yymsp[-6].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy153,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy81,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); + yymsp[-6].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy131,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy47,yymsp[-1].minor.yy528,yymsp[0].minor.yy254); } break; - case 110: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ + case 112: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ { - if( yymsp[-6].minor.yy153==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy404==0 && yymsp[0].minor.yy436==0 ){ - yymsp[-6].minor.yy153 = yymsp[-4].minor.yy153; - }else if( yymsp[-4].minor.yy153->nSrc==1 ){ - yymsp[-6].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy153,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); - if( yymsp[-6].minor.yy153 ){ - SrcItem *pNew = &yymsp[-6].minor.yy153->a[yymsp[-6].minor.yy153->nSrc-1]; - SrcItem *pOld = yymsp[-4].minor.yy153->a; + if( yymsp[-6].minor.yy131==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy528==0 && yymsp[0].minor.yy254==0 ){ + yymsp[-6].minor.yy131 = yymsp[-4].minor.yy131; + }else if( yymsp[-4].minor.yy131->nSrc==1 ){ + yymsp[-6].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy131,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy528,yymsp[0].minor.yy254); + if( yymsp[-6].minor.yy131 ){ + SrcItem *pNew = &yymsp[-6].minor.yy131->a[yymsp[-6].minor.yy131->nSrc-1]; + SrcItem *pOld = yymsp[-4].minor.yy131->a; pNew->zName = pOld->zName; pNew->zDatabase = pOld->zDatabase; pNew->pSelect = pOld->pSelect; @@ -162002,267 +165451,263 @@ static YYACTIONTYPE yy_reduce( pOld->zName = pOld->zDatabase = 0; pOld->pSelect = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy153); + sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy131); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy153); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy153,0,0,0,0,SF_NestedFrom,0); - yymsp[-6].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy153,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); + sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy131); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy131,0,0,0,0,SF_NestedFrom,0); + yymsp[-6].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy131,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy528,yymsp[0].minor.yy254); } } break; - case 111: /* dbnm ::= */ - case 125: /* indexed_opt ::= */ yytestcase(yyruleno==125); + case 113: /* dbnm ::= */ + case 127: /* indexed_opt ::= */ yytestcase(yyruleno==127); {yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;} break; - case 113: /* fullname ::= nm */ + case 115: /* fullname ::= nm */ { - yylhsminor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy153 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy153->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy153 = yylhsminor.yy153; + yymsp[0].minor.yy131 = yylhsminor.yy131; break; - case 114: /* fullname ::= nm DOT nm */ + case 116: /* fullname ::= nm DOT nm */ { - yylhsminor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy153 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy153->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy153 = yylhsminor.yy153; + yymsp[-2].minor.yy131 = yylhsminor.yy131; break; - case 115: /* xfullname ::= nm */ -{yymsp[0].minor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} + case 117: /* xfullname ::= nm */ +{yymsp[0].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; - case 116: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 118: /* xfullname ::= nm DOT nm */ +{yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 117: /* xfullname ::= nm DOT nm AS nm */ + case 119: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy153 ) yymsp[-4].minor.yy153->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy131 ) yymsp[-4].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; - case 118: /* xfullname ::= nm AS nm */ + case 120: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy153 ) yymsp[-2].minor.yy153->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy131 ) yymsp[-2].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; - case 119: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy376 = JT_INNER; } + case 121: /* joinop ::= COMMA|JOIN */ +{ yymsp[0].minor.yy394 = JT_INNER; } break; - case 120: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy376 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} + case 122: /* joinop ::= JOIN_KW JOIN */ +{yymsp[-1].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; - case 121: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy376 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} + case 123: /* joinop ::= JOIN_KW nm JOIN */ +{yymsp[-2].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; - case 122: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy376 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} + case 124: /* joinop ::= JOIN_KW nm nm JOIN */ +{yymsp[-3].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; - case 123: /* on_opt ::= ON expr */ - case 143: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==143); - case 150: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==150); - case 152: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==152); - case 223: /* case_else ::= ELSE expr */ yytestcase(yyruleno==223); - case 244: /* vinto ::= INTO expr */ yytestcase(yyruleno==244); -{yymsp[-1].minor.yy404 = yymsp[0].minor.yy404;} + case 125: /* on_opt ::= ON expr */ + case 145: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==145); + case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152); + case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154); + case 226: /* case_else ::= ELSE expr */ yytestcase(yyruleno==226); + case 247: /* vinto ::= INTO expr */ yytestcase(yyruleno==247); +{yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;} break; - case 124: /* on_opt ::= */ - case 142: /* having_opt ::= */ yytestcase(yyruleno==142); - case 144: /* limit_opt ::= */ yytestcase(yyruleno==144); - case 149: /* where_opt ::= */ yytestcase(yyruleno==149); - case 151: /* where_opt_ret ::= */ yytestcase(yyruleno==151); - case 224: /* case_else ::= */ yytestcase(yyruleno==224); - case 226: /* case_operand ::= */ yytestcase(yyruleno==226); - case 245: /* vinto ::= */ yytestcase(yyruleno==245); -{yymsp[1].minor.yy404 = 0;} + case 126: /* on_opt ::= */ + case 144: /* having_opt ::= */ yytestcase(yyruleno==144); + case 146: /* limit_opt ::= */ yytestcase(yyruleno==146); + case 151: /* where_opt ::= */ yytestcase(yyruleno==151); + case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153); + case 227: /* case_else ::= */ yytestcase(yyruleno==227); + case 229: /* case_operand ::= */ yytestcase(yyruleno==229); + case 248: /* vinto ::= */ yytestcase(yyruleno==248); +{yymsp[1].minor.yy528 = 0;} break; - case 126: /* indexed_opt ::= INDEXED BY nm */ + case 128: /* indexed_opt ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} break; - case 127: /* indexed_opt ::= NOT INDEXED */ + case 129: /* indexed_opt ::= NOT INDEXED */ {yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;} break; - case 128: /* using_opt ::= USING LP idlist RP */ -{yymsp[-3].minor.yy436 = yymsp[-1].minor.yy436;} + case 130: /* using_opt ::= USING LP idlist RP */ +{yymsp[-3].minor.yy254 = yymsp[-1].minor.yy254;} break; - case 129: /* using_opt ::= */ - case 171: /* idlist_opt ::= */ yytestcase(yyruleno==171); -{yymsp[1].minor.yy436 = 0;} + case 131: /* using_opt ::= */ + case 173: /* idlist_opt ::= */ yytestcase(yyruleno==173); +{yymsp[1].minor.yy254 = 0;} break; - case 131: /* orderby_opt ::= ORDER BY sortlist */ - case 141: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==141); -{yymsp[-2].minor.yy70 = yymsp[0].minor.yy70;} + case 133: /* orderby_opt ::= ORDER BY sortlist */ + case 143: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==143); +{yymsp[-2].minor.yy322 = yymsp[0].minor.yy322;} break; - case 132: /* sortlist ::= sortlist COMMA expr sortorder nulls */ + case 134: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy70,yymsp[-2].minor.yy404); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy70,yymsp[-1].minor.yy376,yymsp[0].minor.yy376); + yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322,yymsp[-2].minor.yy528); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy322,yymsp[-1].minor.yy394,yymsp[0].minor.yy394); } break; - case 133: /* sortlist ::= expr sortorder nulls */ + case 135: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy70 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy404); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy70,yymsp[-1].minor.yy376,yymsp[0].minor.yy376); + yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy528); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy322,yymsp[-1].minor.yy394,yymsp[0].minor.yy394); } break; - case 134: /* sortorder ::= ASC */ -{yymsp[0].minor.yy376 = SQLITE_SO_ASC;} + case 136: /* sortorder ::= ASC */ +{yymsp[0].minor.yy394 = SQLITE_SO_ASC;} break; - case 135: /* sortorder ::= DESC */ -{yymsp[0].minor.yy376 = SQLITE_SO_DESC;} + case 137: /* sortorder ::= DESC */ +{yymsp[0].minor.yy394 = SQLITE_SO_DESC;} break; - case 136: /* sortorder ::= */ - case 139: /* nulls ::= */ yytestcase(yyruleno==139); -{yymsp[1].minor.yy376 = SQLITE_SO_UNDEFINED;} + case 138: /* sortorder ::= */ + case 141: /* nulls ::= */ yytestcase(yyruleno==141); +{yymsp[1].minor.yy394 = SQLITE_SO_UNDEFINED;} break; - case 137: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy376 = SQLITE_SO_ASC;} + case 139: /* nulls ::= NULLS FIRST */ +{yymsp[-1].minor.yy394 = SQLITE_SO_ASC;} break; - case 138: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy376 = SQLITE_SO_DESC;} + case 140: /* nulls ::= NULLS LAST */ +{yymsp[-1].minor.yy394 = SQLITE_SO_DESC;} break; - case 145: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy404 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy404,0);} + case 147: /* limit_opt ::= LIMIT expr */ +{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,0);} break; - case 146: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy404 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy404,yymsp[0].minor.yy404);} + case 148: /* limit_opt ::= LIMIT expr OFFSET expr */ +{yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 147: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy404 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy404,yymsp[-2].minor.yy404);} + case 149: /* limit_opt ::= LIMIT expr COMMA expr */ +{yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,yymsp[-2].minor.yy528);} break; - case 148: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + case 150: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy153, &yymsp[-1].minor.yy0); - sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy153,yymsp[0].minor.yy404,0,0); + sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy131, &yymsp[-1].minor.yy0); + sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy131,yymsp[0].minor.yy528,0,0); } break; - case 153: /* where_opt_ret ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy70); yymsp[-1].minor.yy404 = 0;} + case 155: /* where_opt_ret ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy322); yymsp[-1].minor.yy528 = 0;} break; - case 154: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy70); yymsp[-3].minor.yy404 = yymsp[-2].minor.yy404;} + case 156: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy322); yymsp[-3].minor.yy528 = yymsp[-2].minor.yy528;} break; - case 155: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + case 157: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy153, &yymsp[-4].minor.yy0); - sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy70,"set list"); - yymsp[-5].minor.yy153 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy153, yymsp[-1].minor.yy153); - sqlite3Update(pParse,yymsp[-5].minor.yy153,yymsp[-2].minor.yy70,yymsp[0].minor.yy404,yymsp[-6].minor.yy376,0,0,0); + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy131, &yymsp[-4].minor.yy0); + sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy322,"set list"); + yymsp[-5].minor.yy131 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy131, yymsp[-1].minor.yy131); + sqlite3Update(pParse,yymsp[-5].minor.yy131,yymsp[-2].minor.yy322,yymsp[0].minor.yy528,yymsp[-6].minor.yy394,0,0,0); } break; - case 156: /* setlist ::= setlist COMMA nm EQ expr */ + case 158: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy70, yymsp[0].minor.yy404); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy70, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[0].minor.yy528); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, 1); } break; - case 157: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ + case 159: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy70 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy70, yymsp[-3].minor.yy436, yymsp[0].minor.yy404); + yymsp[-6].minor.yy322 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy322, yymsp[-3].minor.yy254, yymsp[0].minor.yy528); } break; - case 158: /* setlist ::= nm EQ expr */ + case 160: /* setlist ::= nm EQ expr */ { - yylhsminor.yy70 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy404); - sqlite3ExprListSetName(pParse, yylhsminor.yy70, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy322 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy528); + sqlite3ExprListSetName(pParse, yylhsminor.yy322, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy70 = yylhsminor.yy70; + yymsp[-2].minor.yy322 = yylhsminor.yy322; break; - case 159: /* setlist ::= LP idlist RP EQ expr */ + case 161: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy70 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy436, yymsp[0].minor.yy404); + yymsp[-4].minor.yy322 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy254, yymsp[0].minor.yy528); } break; - case 160: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + case 162: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy153, yymsp[-1].minor.yy81, yymsp[-2].minor.yy436, yymsp[-5].minor.yy376, yymsp[0].minor.yy190); + sqlite3Insert(pParse, yymsp[-3].minor.yy131, yymsp[-1].minor.yy47, yymsp[-2].minor.yy254, yymsp[-5].minor.yy394, yymsp[0].minor.yy444); } break; - case 161: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + case 163: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-4].minor.yy153, 0, yymsp[-3].minor.yy436, yymsp[-6].minor.yy376, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy131, 0, yymsp[-3].minor.yy254, yymsp[-6].minor.yy394, 0); } break; - case 162: /* upsert ::= */ -{ yymsp[1].minor.yy190 = 0; } + case 164: /* upsert ::= */ +{ yymsp[1].minor.yy444 = 0; } break; - case 163: /* upsert ::= RETURNING selcollist */ -{ yymsp[-1].minor.yy190 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy70); } + case 165: /* upsert ::= RETURNING selcollist */ +{ yymsp[-1].minor.yy444 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy322); } break; - case 164: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ -{ yymsp[-11].minor.yy190 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy70,yymsp[-6].minor.yy404,yymsp[-2].minor.yy70,yymsp[-1].minor.yy404,yymsp[0].minor.yy190);} + case 166: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ +{ yymsp[-11].minor.yy444 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy322,yymsp[-6].minor.yy528,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528,yymsp[0].minor.yy444);} break; - case 165: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ -{ yymsp[-8].minor.yy190 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy70,yymsp[-3].minor.yy404,0,0,yymsp[0].minor.yy190); } + case 167: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ +{ yymsp[-8].minor.yy444 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy322,yymsp[-3].minor.yy528,0,0,yymsp[0].minor.yy444); } break; - case 166: /* upsert ::= ON CONFLICT DO NOTHING returning */ -{ yymsp[-4].minor.yy190 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } + case 168: /* upsert ::= ON CONFLICT DO NOTHING returning */ +{ yymsp[-4].minor.yy444 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; - case 167: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ -{ yymsp[-7].minor.yy190 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy70,yymsp[-1].minor.yy404,0);} + case 169: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ +{ yymsp[-7].minor.yy444 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528,0);} break; - case 168: /* returning ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy70);} + case 170: /* returning ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy322);} break; - case 172: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy436 = yymsp[-1].minor.yy436;} + case 174: /* idlist_opt ::= LP idlist RP */ +{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;} break; - case 173: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy436 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy436,&yymsp[0].minor.yy0);} + case 175: /* idlist ::= idlist COMMA nm */ +{yymsp[-2].minor.yy254 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy254,&yymsp[0].minor.yy0);} break; - case 174: /* idlist ::= nm */ -{yymsp[0].minor.yy436 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} + case 176: /* idlist ::= nm */ +{yymsp[0].minor.yy254 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; - case 175: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy404 = yymsp[-1].minor.yy404;} + case 177: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy528 = yymsp[-1].minor.yy528;} break; - case 176: /* expr ::= ID|INDEXED */ - case 177: /* expr ::= JOIN_KW */ yytestcase(yyruleno==177); -{yymsp[0].minor.yy404=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 178: /* expr ::= ID|INDEXED */ + case 179: /* expr ::= JOIN_KW */ yytestcase(yyruleno==179); +{yymsp[0].minor.yy528=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 178: /* expr ::= nm DOT nm */ + case 180: /* expr ::= nm DOT nm */ { - Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); - Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1); - if( IN_RENAME_OBJECT ){ - sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[0].minor.yy0); - sqlite3RenameTokenMap(pParse, (void*)temp1, &yymsp[-2].minor.yy0); - } - yylhsminor.yy404 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); + Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); + yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy404 = yylhsminor.yy404; + yymsp[-2].minor.yy528 = yylhsminor.yy528; break; - case 179: /* expr ::= nm DOT nm DOT nm */ + case 181: /* expr ::= nm DOT nm DOT nm */ { - Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1); - Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); - Expr *temp3 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1); + Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-4].minor.yy0); + Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); + Expr *temp3 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3); if( IN_RENAME_OBJECT ){ - sqlite3RenameTokenMap(pParse, (void*)temp3, &yymsp[0].minor.yy0); - sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[-2].minor.yy0); + sqlite3RenameTokenRemap(pParse, 0, temp1); } - yylhsminor.yy404 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy404 = yylhsminor.yy404; + yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 180: /* term ::= NULL|FLOAT|BLOB */ - case 181: /* term ::= STRING */ yytestcase(yyruleno==181); -{yymsp[0].minor.yy404=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 182: /* term ::= NULL|FLOAT|BLOB */ + case 183: /* term ::= STRING */ yytestcase(yyruleno==183); +{yymsp[0].minor.yy528=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 182: /* term ::= INTEGER */ + case 184: /* term ::= INTEGER */ { - yylhsminor.yy404 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + yylhsminor.yy528 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + if( yylhsminor.yy528 ) yylhsminor.yy528->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } - yymsp[0].minor.yy404 = yylhsminor.yy404; + yymsp[0].minor.yy528 = yylhsminor.yy528; break; - case 183: /* expr ::= VARIABLE */ + case 185: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy404 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy404, n); + yymsp[0].minor.yy528 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy528, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -162271,159 +165716,167 @@ static YYACTIONTYPE yy_reduce( assert( t.n>=2 ); if( pParse->nested==0 ){ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy404 = 0; + yymsp[0].minor.yy528 = 0; }else{ - yymsp[0].minor.yy404 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy404 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy404->iTable); + yymsp[0].minor.yy528 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy528 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy528->iTable); } } } break; - case 184: /* expr ::= expr COLLATE ID|STRING */ + case 186: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy404 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy404, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy528 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy528, &yymsp[0].minor.yy0, 1); } break; - case 185: /* expr ::= CAST LP expr AS typetoken RP */ + case 187: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy404 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy404, yymsp[-3].minor.yy404, 0); + yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy528, yymsp[-3].minor.yy528, 0); } break; - case 186: /* expr ::= ID|INDEXED LP distinct exprlist RP */ + case 188: /* expr ::= ID|INDEXED LP distinct exprlist RP */ { - yylhsminor.yy404 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy70, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy376); + yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy394); } - yymsp[-4].minor.yy404 = yylhsminor.yy404; + yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 187: /* expr ::= ID|INDEXED LP STAR RP */ + case 189: /* expr ::= ID|INDEXED LP STAR RP */ { - yylhsminor.yy404 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy404 = yylhsminor.yy404; + yymsp[-3].minor.yy528 = yylhsminor.yy528; break; - case 188: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + case 190: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ { - yylhsminor.yy404 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy70, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy376); - sqlite3WindowAttach(pParse, yylhsminor.yy404, yymsp[0].minor.yy49); + yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394); + sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); } - yymsp[-5].minor.yy404 = yylhsminor.yy404; + yymsp[-5].minor.yy528 = yylhsminor.yy528; break; - case 189: /* expr ::= ID|INDEXED LP STAR RP filter_over */ + case 191: /* expr ::= ID|INDEXED LP STAR RP filter_over */ { - yylhsminor.yy404 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy404, yymsp[0].minor.yy49); + yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); } - yymsp[-4].minor.yy404 = yylhsminor.yy404; + yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 190: /* term ::= CTIME_KW */ + case 192: /* term ::= CTIME_KW */ { - yylhsminor.yy404 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy404 = yylhsminor.yy404; + yymsp[0].minor.yy528 = yylhsminor.yy528; break; - case 191: /* expr ::= LP nexprlist COMMA expr RP */ + case 193: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy70, yymsp[-1].minor.yy404); - yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy404 ){ - yymsp[-4].minor.yy404->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528); + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy528 ){ + yymsp[-4].minor.yy528->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy404->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy528->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); } } break; - case 192: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy404=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy404,yymsp[0].minor.yy404);} + case 194: /* expr ::= expr AND expr */ +{yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 193: /* expr ::= expr OR expr */ - case 194: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==194); - case 195: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==195); - case 196: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==196); - case 197: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==197); - case 198: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==198); - case 199: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==199); -{yymsp[-2].minor.yy404=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy404,yymsp[0].minor.yy404);} + case 195: /* expr ::= expr OR expr */ + case 196: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==196); + case 197: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==197); + case 198: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==198); + case 199: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==199); + case 200: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==200); + case 201: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==201); +{yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 200: /* likeop ::= NOT LIKE_KW|MATCH */ + case 202: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 201: /* expr ::= expr likeop expr */ + case 203: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy404); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy404); - yymsp[-2].minor.yy404 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy404, 0); - if( yymsp[-2].minor.yy404 ) yymsp[-2].minor.yy404->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy528); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy528); + yymsp[-2].minor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy528, 0); + if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc; } break; - case 202: /* expr ::= expr likeop expr ESCAPE expr */ + case 204: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy404); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy404); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy404); - yymsp[-4].minor.yy404 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); - if( yymsp[-4].minor.yy404 ) yymsp[-4].minor.yy404->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy528); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); + yymsp[-4].minor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc; } break; - case 203: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy404 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy404,0);} + case 205: /* expr ::= expr ISNULL|NOTNULL */ +{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);} break; - case 204: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy404 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy404,0);} + case 206: /* expr ::= expr NOT NULL */ +{yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);} break; - case 205: /* expr ::= expr IS expr */ + case 207: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy404 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy404,yymsp[0].minor.yy404); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy404, yymsp[-2].minor.yy404, TK_ISNULL); + yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL); } break; - case 206: /* expr ::= expr IS NOT expr */ + case 208: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy404 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy404,yymsp[0].minor.yy404); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy404, yymsp[-3].minor.yy404, TK_NOTNULL); + yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL); } break; - case 207: /* expr ::= NOT expr */ - case 208: /* expr ::= BITNOT expr */ yytestcase(yyruleno==208); -{yymsp[-1].minor.yy404 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy404, 0);/*A-overwrites-B*/} + case 209: /* expr ::= NOT expr */ + case 210: /* expr ::= BITNOT expr */ yytestcase(yyruleno==210); +{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/} break; - case 209: /* expr ::= PLUS|MINUS expr */ + case 211: /* expr ::= PLUS|MINUS expr */ { - yymsp[-1].minor.yy404 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy404, 0); + yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0); /*A-overwrites-B*/ } break; - case 210: /* between_op ::= BETWEEN */ - case 213: /* in_op ::= IN */ yytestcase(yyruleno==213); -{yymsp[0].minor.yy376 = 0;} + case 212: /* expr ::= expr PTR expr */ +{ + ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528); + pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528); + yylhsminor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); +} + yymsp[-2].minor.yy528 = yylhsminor.yy528; + break; + case 213: /* between_op ::= BETWEEN */ + case 216: /* in_op ::= IN */ yytestcase(yyruleno==216); +{yymsp[0].minor.yy394 = 0;} break; - case 212: /* expr ::= expr between_op expr AND expr */ + case 215: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy404); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy404); - yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy404, 0); - if( yymsp[-4].minor.yy404 ){ - yymsp[-4].minor.yy404->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy528, 0); + if( yymsp[-4].minor.yy528 ){ + yymsp[-4].minor.yy528->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); + if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 215: /* expr ::= expr in_op LP exprlist RP */ + case 218: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy70==0 ){ + if( yymsp[-1].minor.yy322==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -162432,197 +165885,205 @@ static YYACTIONTYPE yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy404); - yymsp[-4].minor.yy404 = sqlite3Expr(pParse->db, TK_INTEGER, yymsp[-3].minor.yy376 ? "1" : "0"); - }else if( yymsp[-1].minor.yy70->nExpr==1 && sqlite3ExprIsConstant(yymsp[-1].minor.yy70->a[0].pExpr) ){ - Expr *pRHS = yymsp[-1].minor.yy70->a[0].pExpr; - yymsp[-1].minor.yy70->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy70); - pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy404, pRHS); - if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); - }else{ - yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy404, 0); - if( yymsp[-4].minor.yy404 ){ - yymsp[-4].minor.yy404->x.pList = yymsp[-1].minor.yy70; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy404); + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy528); + yymsp[-4].minor.yy528 = sqlite3Expr(pParse->db, TK_INTEGER, yymsp[-3].minor.yy394 ? "1" : "0"); + }else{ + Expr *pRHS = yymsp[-1].minor.yy322->a[0].pExpr; + if( yymsp[-1].minor.yy322->nExpr==1 && sqlite3ExprIsConstant(pRHS) && yymsp[-4].minor.yy528->op!=TK_VECTOR ){ + yymsp[-1].minor.yy322->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); + pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy528, pRHS); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy70); + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); + if( yymsp[-4].minor.yy528==0 ){ + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); + }else if( yymsp[-4].minor.yy528->pLeft->op==TK_VECTOR ){ + int nExpr = yymsp[-4].minor.yy528->pLeft->x.pList->nExpr; + Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy322); + if( pSelectRHS ){ + parserDoubleLinkSelect(pParse, pSelectRHS); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelectRHS); + } + }else{ + yymsp[-4].minor.yy528->x.pList = yymsp[-1].minor.yy322; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy528); + } } - if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); + if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } } break; - case 216: /* expr ::= LP select RP */ + case 219: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy404 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy404, yymsp[-1].minor.yy81); + yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47); } break; - case 217: /* expr ::= expr in_op LP select RP */ + case 220: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy404, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy404, yymsp[-1].minor.yy81); - if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47); + if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 218: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 221: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy70 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy70); - yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy404, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy404, pSelect); - if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); + if( yymsp[0].minor.yy322 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy322); + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelect); + if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 219: /* expr ::= EXISTS LP select RP */ + case 222: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy404 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy81); + p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47); } break; - case 220: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 223: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy404, 0); - if( yymsp[-4].minor.yy404 ){ - yymsp[-4].minor.yy404->x.pList = yymsp[-1].minor.yy404 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy70,yymsp[-1].minor.yy404) : yymsp[-2].minor.yy70; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy404); + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0); + if( yymsp[-4].minor.yy528 ){ + yymsp[-4].minor.yy528->x.pList = yymsp[-1].minor.yy528 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528) : yymsp[-2].minor.yy322; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy528); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy70); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy404); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy322); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528); } } break; - case 221: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 224: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy70, yymsp[-2].minor.yy404); - yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy70, yymsp[0].minor.yy404); + yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); + yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528); } break; - case 222: /* case_exprlist ::= WHEN expr THEN expr */ + case 225: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy70 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy404); - yymsp[-3].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy70, yymsp[0].minor.yy404); + yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); + yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528); } break; - case 225: /* case_operand ::= expr */ -{yymsp[0].minor.yy404 = yymsp[0].minor.yy404; /*A-overwrites-X*/} + case 228: /* case_operand ::= expr */ +{yymsp[0].minor.yy528 = yymsp[0].minor.yy528; /*A-overwrites-X*/} break; - case 228: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy70,yymsp[0].minor.yy404);} + case 231: /* nexprlist ::= nexprlist COMMA expr */ +{yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);} break; - case 229: /* nexprlist ::= expr */ -{yymsp[0].minor.yy70 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy404); /*A-overwrites-Y*/} + case 232: /* nexprlist ::= expr */ +{yymsp[0].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy528); /*A-overwrites-Y*/} break; - case 231: /* paren_exprlist ::= LP exprlist RP */ - case 236: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==236); -{yymsp[-2].minor.yy70 = yymsp[-1].minor.yy70;} + case 234: /* paren_exprlist ::= LP exprlist RP */ + case 239: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==239); +{yymsp[-2].minor.yy322 = yymsp[-1].minor.yy322;} break; - case 232: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + case 235: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy70, yymsp[-10].minor.yy376, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy404, SQLITE_SO_ASC, yymsp[-8].minor.yy376, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy394, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy528, SQLITE_SO_ASC, yymsp[-8].minor.yy394, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } } break; - case 233: /* uniqueflag ::= UNIQUE */ - case 275: /* raisetype ::= ABORT */ yytestcase(yyruleno==275); -{yymsp[0].minor.yy376 = OE_Abort;} + case 236: /* uniqueflag ::= UNIQUE */ + case 278: /* raisetype ::= ABORT */ yytestcase(yyruleno==278); +{yymsp[0].minor.yy394 = OE_Abort;} break; - case 234: /* uniqueflag ::= */ -{yymsp[1].minor.yy376 = OE_None;} + case 237: /* uniqueflag ::= */ +{yymsp[1].minor.yy394 = OE_None;} break; - case 237: /* eidlist ::= eidlist COMMA nm collate sortorder */ + case 240: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy70 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy70, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy376, yymsp[0].minor.yy376); + yymsp[-4].minor.yy322 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); } break; - case 238: /* eidlist ::= nm collate sortorder */ + case 241: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy70 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy376, yymsp[0].minor.yy376); /*A-overwrites-Y*/ + yymsp[-2].minor.yy322 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); /*A-overwrites-Y*/ } break; - case 241: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy153, yymsp[-1].minor.yy376);} + case 244: /* cmd ::= DROP INDEX ifexists fullname */ +{sqlite3DropIndex(pParse, yymsp[0].minor.yy131, yymsp[-1].minor.yy394);} break; - case 242: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy404);} + case 245: /* cmd ::= VACUUM vinto */ +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy528);} break; - case 243: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy404);} + case 246: /* cmd ::= VACUUM nm vinto */ +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy528);} break; - case 246: /* cmd ::= PRAGMA nm dbnm */ + case 249: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; - case 247: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ + case 250: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; - case 248: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ + case 251: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; - case 249: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ + case 252: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; - case 250: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ + case 253: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; - case 253: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + case 256: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy157, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy33, &all); } break; - case 254: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + case 257: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy376, yymsp[-4].minor.yy262.a, yymsp[-4].minor.yy262.b, yymsp[-2].minor.yy153, yymsp[0].minor.yy404, yymsp[-10].minor.yy376, yymsp[-8].minor.yy376); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy394, yymsp[-4].minor.yy180.a, yymsp[-4].minor.yy180.b, yymsp[-2].minor.yy131, yymsp[0].minor.yy528, yymsp[-10].minor.yy394, yymsp[-8].minor.yy394); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; - case 255: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy376 = yymsp[0].major; /*A-overwrites-X*/ } + case 258: /* trigger_time ::= BEFORE|AFTER */ +{ yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/ } break; - case 256: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy376 = TK_INSTEAD;} + case 259: /* trigger_time ::= INSTEAD OF */ +{ yymsp[-1].minor.yy394 = TK_INSTEAD;} break; - case 257: /* trigger_time ::= */ -{ yymsp[1].minor.yy376 = TK_BEFORE; } + case 260: /* trigger_time ::= */ +{ yymsp[1].minor.yy394 = TK_BEFORE; } break; - case 258: /* trigger_event ::= DELETE|INSERT */ - case 259: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==259); -{yymsp[0].minor.yy262.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy262.b = 0;} + case 261: /* trigger_event ::= DELETE|INSERT */ + case 262: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==262); +{yymsp[0].minor.yy180.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy180.b = 0;} break; - case 260: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy262.a = TK_UPDATE; yymsp[-2].minor.yy262.b = yymsp[0].minor.yy436;} + case 263: /* trigger_event ::= UPDATE OF idlist */ +{yymsp[-2].minor.yy180.a = TK_UPDATE; yymsp[-2].minor.yy180.b = yymsp[0].minor.yy254;} break; - case 261: /* when_clause ::= */ - case 280: /* key_opt ::= */ yytestcase(yyruleno==280); -{ yymsp[1].minor.yy404 = 0; } + case 264: /* when_clause ::= */ + case 283: /* key_opt ::= */ yytestcase(yyruleno==283); +{ yymsp[1].minor.yy528 = 0; } break; - case 262: /* when_clause ::= WHEN expr */ - case 281: /* key_opt ::= KEY expr */ yytestcase(yyruleno==281); -{ yymsp[-1].minor.yy404 = yymsp[0].minor.yy404; } + case 265: /* when_clause ::= WHEN expr */ + case 284: /* key_opt ::= KEY expr */ yytestcase(yyruleno==284); +{ yymsp[-1].minor.yy528 = yymsp[0].minor.yy528; } break; - case 263: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + case 266: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy157!=0 ); - yymsp[-2].minor.yy157->pLast->pNext = yymsp[-1].minor.yy157; - yymsp[-2].minor.yy157->pLast = yymsp[-1].minor.yy157; + assert( yymsp[-2].minor.yy33!=0 ); + yymsp[-2].minor.yy33->pLast->pNext = yymsp[-1].minor.yy33; + yymsp[-2].minor.yy33->pLast = yymsp[-1].minor.yy33; } break; - case 264: /* trigger_cmd_list ::= trigger_cmd SEMI */ + case 267: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy157!=0 ); - yymsp[-1].minor.yy157->pLast = yymsp[-1].minor.yy157; + assert( yymsp[-1].minor.yy33!=0 ); + yymsp[-1].minor.yy33->pLast = yymsp[-1].minor.yy33; } break; - case 265: /* trnm ::= nm DOT nm */ + case 268: /* trnm ::= nm DOT nm */ { yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, @@ -162630,368 +166091,369 @@ static YYACTIONTYPE yy_reduce( "statements within triggers"); } break; - case 266: /* tridxby ::= INDEXED BY nm */ + case 269: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 267: /* tridxby ::= NOT INDEXED */ + case 270: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 268: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy157 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy153, yymsp[-3].minor.yy70, yymsp[-1].minor.yy404, yymsp[-7].minor.yy376, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy504);} - yymsp[-8].minor.yy157 = yylhsminor.yy157; + case 271: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ +{yylhsminor.yy33 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy131, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528, yymsp[-7].minor.yy394, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy522);} + yymsp[-8].minor.yy33 = yylhsminor.yy33; break; - case 269: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + case 272: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy157 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy436,yymsp[-2].minor.yy81,yymsp[-6].minor.yy376,yymsp[-1].minor.yy190,yymsp[-7].minor.yy504,yymsp[0].minor.yy504);/*yylhsminor.yy157-overwrites-yymsp[-6].minor.yy376*/ + yylhsminor.yy33 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy254,yymsp[-2].minor.yy47,yymsp[-6].minor.yy394,yymsp[-1].minor.yy444,yymsp[-7].minor.yy522,yymsp[0].minor.yy522);/*yylhsminor.yy33-overwrites-yymsp[-6].minor.yy394*/ } - yymsp[-7].minor.yy157 = yylhsminor.yy157; + yymsp[-7].minor.yy33 = yylhsminor.yy33; break; - case 270: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy157 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy404, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy504);} - yymsp[-5].minor.yy157 = yylhsminor.yy157; + case 273: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ +{yylhsminor.yy33 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy528, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy522);} + yymsp[-5].minor.yy33 = yylhsminor.yy33; break; - case 271: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy157 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy81, yymsp[-2].minor.yy504, yymsp[0].minor.yy504); /*yylhsminor.yy157-overwrites-yymsp[-1].minor.yy81*/} - yymsp[-2].minor.yy157 = yylhsminor.yy157; + case 274: /* trigger_cmd ::= scanpt select scanpt */ +{yylhsminor.yy33 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy47, yymsp[-2].minor.yy522, yymsp[0].minor.yy522); /*yylhsminor.yy33-overwrites-yymsp[-1].minor.yy47*/} + yymsp[-2].minor.yy33 = yylhsminor.yy33; break; - case 272: /* expr ::= RAISE LP IGNORE RP */ + case 275: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy404 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy404 ){ - yymsp[-3].minor.yy404->affExpr = OE_Ignore; + yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy528 ){ + yymsp[-3].minor.yy528->affExpr = OE_Ignore; } } break; - case 273: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 276: /* expr ::= RAISE LP raisetype COMMA nm RP */ { - yymsp[-5].minor.yy404 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy404 ) { - yymsp[-5].minor.yy404->affExpr = (char)yymsp[-3].minor.yy376; + yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); + if( yymsp[-5].minor.yy528 ) { + yymsp[-5].minor.yy528->affExpr = (char)yymsp[-3].minor.yy394; } } break; - case 274: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy376 = OE_Rollback;} + case 277: /* raisetype ::= ROLLBACK */ +{yymsp[0].minor.yy394 = OE_Rollback;} break; - case 276: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy376 = OE_Fail;} + case 279: /* raisetype ::= FAIL */ +{yymsp[0].minor.yy394 = OE_Fail;} break; - case 277: /* cmd ::= DROP TRIGGER ifexists fullname */ + case 280: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy153,yymsp[-1].minor.yy376); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy131,yymsp[-1].minor.yy394); } break; - case 278: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + case 281: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy404, yymsp[-1].minor.yy404, yymsp[0].minor.yy404); + sqlite3Attach(pParse, yymsp[-3].minor.yy528, yymsp[-1].minor.yy528, yymsp[0].minor.yy528); } break; - case 279: /* cmd ::= DETACH database_kw_opt expr */ + case 282: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy404); + sqlite3Detach(pParse, yymsp[0].minor.yy528); } break; - case 282: /* cmd ::= REINDEX */ + case 285: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; - case 283: /* cmd ::= REINDEX nm dbnm */ + case 286: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 284: /* cmd ::= ANALYZE */ + case 287: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; - case 285: /* cmd ::= ANALYZE nm dbnm */ + case 288: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 286: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ + case 289: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy153,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy131,&yymsp[0].minor.yy0); } break; - case 287: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + case 290: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ { yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); } break; - case 288: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + case 291: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ { - sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy153, &yymsp[0].minor.yy0); + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy131, &yymsp[0].minor.yy0); } break; - case 289: /* add_column_fullname ::= fullname */ + case 292: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy153); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy131); } break; - case 290: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + case 293: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy153, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy131, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 291: /* cmd ::= create_vtab */ + case 294: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; - case 292: /* cmd ::= create_vtab LP vtabarglist RP */ + case 295: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; - case 293: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + case 296: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy376); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy394); } break; - case 294: /* vtabarg ::= */ + case 297: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; - case 295: /* vtabargtoken ::= ANY */ - case 296: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==296); - case 297: /* lp ::= LP */ yytestcase(yyruleno==297); + case 298: /* vtabargtoken ::= ANY */ + case 299: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==299); + case 300: /* lp ::= LP */ yytestcase(yyruleno==300); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; - case 298: /* with ::= WITH wqlist */ - case 299: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==299); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy103, 1); } + case 301: /* with ::= WITH wqlist */ + case 302: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==302); +{ sqlite3WithPush(pParse, yymsp[0].minor.yy521, 1); } break; - case 300: /* wqas ::= AS */ -{yymsp[0].minor.yy552 = M10d_Any;} + case 303: /* wqas ::= AS */ +{yymsp[0].minor.yy516 = M10d_Any;} break; - case 301: /* wqas ::= AS MATERIALIZED */ -{yymsp[-1].minor.yy552 = M10d_Yes;} + case 304: /* wqas ::= AS MATERIALIZED */ +{yymsp[-1].minor.yy516 = M10d_Yes;} break; - case 302: /* wqas ::= AS NOT MATERIALIZED */ -{yymsp[-2].minor.yy552 = M10d_No;} + case 305: /* wqas ::= AS NOT MATERIALIZED */ +{yymsp[-2].minor.yy516 = M10d_No;} break; - case 303: /* wqitem ::= nm eidlist_opt wqas LP select RP */ + case 306: /* wqitem ::= nm eidlist_opt wqas LP select RP */ { - yymsp[-5].minor.yy329 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy70, yymsp[-1].minor.yy81, yymsp[-3].minor.yy552); /*A-overwrites-X*/ + yymsp[-5].minor.yy385 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy322, yymsp[-1].minor.yy47, yymsp[-3].minor.yy516); /*A-overwrites-X*/ } break; - case 304: /* wqlist ::= wqitem */ + case 307: /* wqlist ::= wqitem */ { - yymsp[0].minor.yy103 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy329); /*A-overwrites-X*/ + yymsp[0].minor.yy521 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy385); /*A-overwrites-X*/ } break; - case 305: /* wqlist ::= wqlist COMMA wqitem */ + case 308: /* wqlist ::= wqlist COMMA wqitem */ { - yymsp[-2].minor.yy103 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy103, yymsp[0].minor.yy329); + yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385); } break; - case 306: /* windowdefn_list ::= windowdefn */ -{ yylhsminor.yy49 = yymsp[0].minor.yy49; } - yymsp[0].minor.yy49 = yylhsminor.yy49; + case 309: /* windowdefn_list ::= windowdefn */ +{ yylhsminor.yy41 = yymsp[0].minor.yy41; } + yymsp[0].minor.yy41 = yylhsminor.yy41; break; - case 307: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 310: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - assert( yymsp[0].minor.yy49!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy49, yymsp[-2].minor.yy49); - yymsp[0].minor.yy49->pNextWin = yymsp[-2].minor.yy49; - yylhsminor.yy49 = yymsp[0].minor.yy49; + assert( yymsp[0].minor.yy41!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41); + yymsp[0].minor.yy41->pNextWin = yymsp[-2].minor.yy41; + yylhsminor.yy41 = yymsp[0].minor.yy41; } - yymsp[-2].minor.yy49 = yylhsminor.yy49; + yymsp[-2].minor.yy41 = yylhsminor.yy41; break; - case 308: /* windowdefn ::= nm AS LP window RP */ + case 311: /* windowdefn ::= nm AS LP window RP */ { - if( ALWAYS(yymsp[-1].minor.yy49) ){ - yymsp[-1].minor.yy49->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + if( ALWAYS(yymsp[-1].minor.yy41) ){ + yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy49 = yymsp[-1].minor.yy49; + yylhsminor.yy41 = yymsp[-1].minor.yy41; } - yymsp[-4].minor.yy49 = yylhsminor.yy49; + yymsp[-4].minor.yy41 = yylhsminor.yy41; break; - case 309: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 312: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, yymsp[-2].minor.yy70, yymsp[-1].minor.yy70, 0); + yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0); } break; - case 310: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 313: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, yymsp[-2].minor.yy70, yymsp[-1].minor.yy70, &yymsp[-5].minor.yy0); + yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy49 = yylhsminor.yy49; + yymsp[-5].minor.yy41 = yylhsminor.yy41; break; - case 311: /* window ::= ORDER BY sortlist frame_opt */ + case 314: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, 0, yymsp[-1].minor.yy70, 0); + yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0); } break; - case 312: /* window ::= nm ORDER BY sortlist frame_opt */ + case 315: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, 0, yymsp[-1].minor.yy70, &yymsp[-4].minor.yy0); + yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy49 = yylhsminor.yy49; + yymsp[-4].minor.yy41 = yylhsminor.yy41; break; - case 313: /* window ::= frame_opt */ - case 332: /* filter_over ::= over_clause */ yytestcase(yyruleno==332); + case 316: /* window ::= frame_opt */ + case 335: /* filter_over ::= over_clause */ yytestcase(yyruleno==335); { - yylhsminor.yy49 = yymsp[0].minor.yy49; + yylhsminor.yy41 = yymsp[0].minor.yy41; } - yymsp[0].minor.yy49 = yylhsminor.yy49; + yymsp[0].minor.yy41 = yylhsminor.yy41; break; - case 314: /* window ::= nm frame_opt */ + case 317: /* window ::= nm frame_opt */ { - yylhsminor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy49 = yylhsminor.yy49; + yymsp[-1].minor.yy41 = yylhsminor.yy41; break; - case 315: /* frame_opt ::= */ + case 318: /* frame_opt ::= */ { - yymsp[1].minor.yy49 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy41 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 316: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 319: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy49 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy376, yymsp[-1].minor.yy117.eType, yymsp[-1].minor.yy117.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy552); + yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy394, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy516); } - yymsp[-2].minor.yy49 = yylhsminor.yy49; + yymsp[-2].minor.yy41 = yylhsminor.yy41; break; - case 317: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 320: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy49 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy376, yymsp[-3].minor.yy117.eType, yymsp[-3].minor.yy117.pExpr, yymsp[-1].minor.yy117.eType, yymsp[-1].minor.yy117.pExpr, yymsp[0].minor.yy552); + yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy394, yymsp[-3].minor.yy595.eType, yymsp[-3].minor.yy595.pExpr, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, yymsp[0].minor.yy516); } - yymsp[-5].minor.yy49 = yylhsminor.yy49; + yymsp[-5].minor.yy41 = yylhsminor.yy41; break; - case 319: /* frame_bound_s ::= frame_bound */ - case 321: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==321); -{yylhsminor.yy117 = yymsp[0].minor.yy117;} - yymsp[0].minor.yy117 = yylhsminor.yy117; + case 322: /* frame_bound_s ::= frame_bound */ + case 324: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==324); +{yylhsminor.yy595 = yymsp[0].minor.yy595;} + yymsp[0].minor.yy595 = yylhsminor.yy595; break; - case 320: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 322: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==322); - case 324: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==324); -{yylhsminor.yy117.eType = yymsp[-1].major; yylhsminor.yy117.pExpr = 0;} - yymsp[-1].minor.yy117 = yylhsminor.yy117; + case 323: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 325: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==325); + case 327: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==327); +{yylhsminor.yy595.eType = yymsp[-1].major; yylhsminor.yy595.pExpr = 0;} + yymsp[-1].minor.yy595 = yylhsminor.yy595; break; - case 323: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy117.eType = yymsp[0].major; yylhsminor.yy117.pExpr = yymsp[-1].minor.yy404;} - yymsp[-1].minor.yy117 = yylhsminor.yy117; + case 326: /* frame_bound ::= expr PRECEDING|FOLLOWING */ +{yylhsminor.yy595.eType = yymsp[0].major; yylhsminor.yy595.pExpr = yymsp[-1].minor.yy528;} + yymsp[-1].minor.yy595 = yylhsminor.yy595; break; - case 325: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy552 = 0;} + case 328: /* frame_exclude_opt ::= */ +{yymsp[1].minor.yy516 = 0;} break; - case 326: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy552 = yymsp[0].minor.yy552;} + case 329: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ +{yymsp[-1].minor.yy516 = yymsp[0].minor.yy516;} break; - case 327: /* frame_exclude ::= NO OTHERS */ - case 328: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==328); -{yymsp[-1].minor.yy552 = yymsp[-1].major; /*A-overwrites-X*/} + case 330: /* frame_exclude ::= NO OTHERS */ + case 331: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==331); +{yymsp[-1].minor.yy516 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 329: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy552 = yymsp[0].major; /*A-overwrites-X*/} + case 332: /* frame_exclude ::= GROUP|TIES */ +{yymsp[0].minor.yy516 = yymsp[0].major; /*A-overwrites-X*/} break; - case 330: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy49 = yymsp[0].minor.yy49; } + case 333: /* window_clause ::= WINDOW windowdefn_list */ +{ yymsp[-1].minor.yy41 = yymsp[0].minor.yy41; } break; - case 331: /* filter_over ::= filter_clause over_clause */ + case 334: /* filter_over ::= filter_clause over_clause */ { - if( yymsp[0].minor.yy49 ){ - yymsp[0].minor.yy49->pFilter = yymsp[-1].minor.yy404; + if( yymsp[0].minor.yy41 ){ + yymsp[0].minor.yy41->pFilter = yymsp[-1].minor.yy528; }else{ - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy404); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528); } - yylhsminor.yy49 = yymsp[0].minor.yy49; + yylhsminor.yy41 = yymsp[0].minor.yy41; } - yymsp[-1].minor.yy49 = yylhsminor.yy49; + yymsp[-1].minor.yy41 = yylhsminor.yy41; break; - case 333: /* filter_over ::= filter_clause */ + case 336: /* filter_over ::= filter_clause */ { - yylhsminor.yy49 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy49 ){ - yylhsminor.yy49->eFrmType = TK_FILTER; - yylhsminor.yy49->pFilter = yymsp[0].minor.yy404; + yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy41 ){ + yylhsminor.yy41->eFrmType = TK_FILTER; + yylhsminor.yy41->pFilter = yymsp[0].minor.yy528; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy404); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy528); } } - yymsp[0].minor.yy49 = yylhsminor.yy49; + yymsp[0].minor.yy41 = yylhsminor.yy41; break; - case 334: /* over_clause ::= OVER LP window RP */ + case 337: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy49 = yymsp[-1].minor.yy49; - assert( yymsp[-3].minor.yy49!=0 ); + yymsp[-3].minor.yy41 = yymsp[-1].minor.yy41; + assert( yymsp[-3].minor.yy41!=0 ); } break; - case 335: /* over_clause ::= OVER nm */ + case 338: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy49 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy49 ){ - yymsp[-1].minor.yy49->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy41 ){ + yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; - case 336: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy404 = yymsp[-1].minor.yy404; } + case 339: /* filter_clause ::= FILTER LP WHERE expr RP */ +{ yymsp[-4].minor.yy528 = yymsp[-1].minor.yy528; } break; default: - /* (337) input ::= cmdlist */ yytestcase(yyruleno==337); - /* (338) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==338); - /* (339) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=339); - /* (340) ecmd ::= SEMI */ yytestcase(yyruleno==340); - /* (341) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==341); - /* (342) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=342); - /* (343) trans_opt ::= */ yytestcase(yyruleno==343); - /* (344) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==344); - /* (345) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==345); - /* (346) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==346); - /* (347) savepoint_opt ::= */ yytestcase(yyruleno==347); - /* (348) cmd ::= create_table create_table_args */ yytestcase(yyruleno==348); - /* (349) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==349); - /* (350) columnlist ::= columnname carglist */ yytestcase(yyruleno==350); - /* (351) nm ::= ID|INDEXED */ yytestcase(yyruleno==351); - /* (352) nm ::= STRING */ yytestcase(yyruleno==352); - /* (353) nm ::= JOIN_KW */ yytestcase(yyruleno==353); - /* (354) typetoken ::= typename */ yytestcase(yyruleno==354); - /* (355) typename ::= ID|STRING */ yytestcase(yyruleno==355); - /* (356) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=356); - /* (357) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=357); - /* (358) carglist ::= carglist ccons */ yytestcase(yyruleno==358); - /* (359) carglist ::= */ yytestcase(yyruleno==359); - /* (360) ccons ::= NULL onconf */ yytestcase(yyruleno==360); - /* (361) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==361); - /* (362) ccons ::= AS generated */ yytestcase(yyruleno==362); - /* (363) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==363); - /* (364) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==364); - /* (365) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=365); - /* (366) tconscomma ::= */ yytestcase(yyruleno==366); - /* (367) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=367); - /* (368) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=368); - /* (369) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=369); - /* (370) oneselect ::= values */ yytestcase(yyruleno==370); - /* (371) sclp ::= selcollist COMMA */ yytestcase(yyruleno==371); - /* (372) as ::= ID|STRING */ yytestcase(yyruleno==372); - /* (373) returning ::= */ yytestcase(yyruleno==373); - /* (374) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=374); - /* (375) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==375); - /* (376) exprlist ::= nexprlist */ yytestcase(yyruleno==376); - /* (377) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=377); - /* (378) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=378); - /* (379) nmnum ::= ON */ yytestcase(yyruleno==379); - /* (380) nmnum ::= DELETE */ yytestcase(yyruleno==380); - /* (381) nmnum ::= DEFAULT */ yytestcase(yyruleno==381); - /* (382) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==382); - /* (383) foreach_clause ::= */ yytestcase(yyruleno==383); - /* (384) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==384); - /* (385) trnm ::= nm */ yytestcase(yyruleno==385); - /* (386) tridxby ::= */ yytestcase(yyruleno==386); - /* (387) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==387); - /* (388) database_kw_opt ::= */ yytestcase(yyruleno==388); - /* (389) kwcolumn_opt ::= */ yytestcase(yyruleno==389); - /* (390) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==390); - /* (391) vtabarglist ::= vtabarg */ yytestcase(yyruleno==391); - /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==392); - /* (393) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==393); - /* (394) anylist ::= */ yytestcase(yyruleno==394); - /* (395) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==395); - /* (396) anylist ::= anylist ANY */ yytestcase(yyruleno==396); - /* (397) with ::= */ yytestcase(yyruleno==397); + /* (340) input ::= cmdlist */ yytestcase(yyruleno==340); + /* (341) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==341); + /* (342) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=342); + /* (343) ecmd ::= SEMI */ yytestcase(yyruleno==343); + /* (344) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==344); + /* (345) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=345); + /* (346) trans_opt ::= */ yytestcase(yyruleno==346); + /* (347) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==347); + /* (348) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==348); + /* (349) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==349); + /* (350) savepoint_opt ::= */ yytestcase(yyruleno==350); + /* (351) cmd ::= create_table create_table_args */ yytestcase(yyruleno==351); + /* (352) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=352); + /* (353) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==353); + /* (354) columnlist ::= columnname carglist */ yytestcase(yyruleno==354); + /* (355) nm ::= ID|INDEXED */ yytestcase(yyruleno==355); + /* (356) nm ::= STRING */ yytestcase(yyruleno==356); + /* (357) nm ::= JOIN_KW */ yytestcase(yyruleno==357); + /* (358) typetoken ::= typename */ yytestcase(yyruleno==358); + /* (359) typename ::= ID|STRING */ yytestcase(yyruleno==359); + /* (360) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=360); + /* (361) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=361); + /* (362) carglist ::= carglist ccons */ yytestcase(yyruleno==362); + /* (363) carglist ::= */ yytestcase(yyruleno==363); + /* (364) ccons ::= NULL onconf */ yytestcase(yyruleno==364); + /* (365) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==365); + /* (366) ccons ::= AS generated */ yytestcase(yyruleno==366); + /* (367) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==367); + /* (368) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==368); + /* (369) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=369); + /* (370) tconscomma ::= */ yytestcase(yyruleno==370); + /* (371) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=371); + /* (372) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=372); + /* (373) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=373); + /* (374) oneselect ::= values */ yytestcase(yyruleno==374); + /* (375) sclp ::= selcollist COMMA */ yytestcase(yyruleno==375); + /* (376) as ::= ID|STRING */ yytestcase(yyruleno==376); + /* (377) returning ::= */ yytestcase(yyruleno==377); + /* (378) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=378); + /* (379) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==379); + /* (380) exprlist ::= nexprlist */ yytestcase(yyruleno==380); + /* (381) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=381); + /* (382) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=382); + /* (383) nmnum ::= ON */ yytestcase(yyruleno==383); + /* (384) nmnum ::= DELETE */ yytestcase(yyruleno==384); + /* (385) nmnum ::= DEFAULT */ yytestcase(yyruleno==385); + /* (386) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==386); + /* (387) foreach_clause ::= */ yytestcase(yyruleno==387); + /* (388) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==388); + /* (389) trnm ::= nm */ yytestcase(yyruleno==389); + /* (390) tridxby ::= */ yytestcase(yyruleno==390); + /* (391) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==391); + /* (392) database_kw_opt ::= */ yytestcase(yyruleno==392); + /* (393) kwcolumn_opt ::= */ yytestcase(yyruleno==393); + /* (394) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==394); + /* (395) vtabarglist ::= vtabarg */ yytestcase(yyruleno==395); + /* (396) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==396); + /* (397) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==397); + /* (398) anylist ::= */ yytestcase(yyruleno==398); + /* (399) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==399); + /* (400) anylist ::= anylist ANY */ yytestcase(yyruleno==400); + /* (401) with ::= */ yytestcase(yyruleno==401); break; /********** End reduce actions ************************************************/ }; @@ -163149,8 +166611,8 @@ SQLITE_PRIVATE void sqlite3Parser( yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); if( yyact >= YY_MIN_REDUCE ){ unsigned int yyruleno = yyact - YY_MIN_REDUCE; /* Reduce by this rule */ - assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); #ifndef NDEBUG + assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); if( yyTraceFILE ){ int yysize = yyRuleInfoNRhs[yyruleno]; if( yysize ){ @@ -163248,14 +166710,13 @@ SQLITE_PRIVATE void sqlite3Parser( yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion); yymajor = YYNOCODE; }else{ - while( yypParser->yytos >= yypParser->yystack - && (yyact = yy_find_reduce_action( - yypParser->yytos->stateno, - YYERRORSYMBOL)) > YY_MAX_SHIFTREDUCE - ){ + while( yypParser->yytos > yypParser->yystack ){ + yyact = yy_find_reduce_action(yypParser->yytos->stateno, + YYERRORSYMBOL); + if( yyact<=YY_MAX_SHIFTREDUCE ) break; yy_pop_parser_stack(yypParser); } - if( yypParser->yytos < yypParser->yystack || yymajor==0 ){ + if( yypParser->yytos <= yypParser->yystack || yymajor==0 ){ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yy_parse_failed(yypParser); #ifndef YYNOERRORRECOVERY @@ -164115,6 +167576,9 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ for(i=2; (c=z[i])!=0 && c!='\n'; i++){} *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ return i; + }else if( z[1]=='>' ){ + *tokenType = TK_PTR; + return 2 + (z[2]=='>'); } *tokenType = TK_MINUS; return 1; @@ -164384,13 +167848,9 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ } /* -** Run the parser on the given SQL string. The parser structure is -** passed in. An SQLITE_ status code is returned. If an error occurs -** then an and attempt is made to write an error message into -** memory obtained from sqlite3_malloc() and to make *pzErrMsg point to that -** error message. +** Run the parser on the given SQL string. */ -SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzErrMsg){ +SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ int nErr = 0; /* Number of errors encountered */ void *pEngine; /* The LEMON-generated LALR(1) parser */ int n = 0; /* Length of the next token token */ @@ -164398,6 +167858,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr int lastTokenParsed = -1; /* type of the previous token */ sqlite3 *db = pParse->db; /* The database connection */ int mxSqlLen; /* Max length of an SQL string */ + Parse *pParentParse = 0; /* Outer parse context, if any */ #ifdef sqlite3Parser_ENGINEALWAYSONSTACK yyParser sEngine; /* Space to hold the Lemon-generated Parser object */ #endif @@ -164410,7 +167871,6 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr } pParse->rc = SQLITE_OK; pParse->zTail = zSql; - assert( pzErrMsg!=0 ); #ifdef SQLITE_DEBUG if( db->flags & SQLITE_ParserTrace ){ printf("parser: [[[%s]]]\n", zSql); @@ -164433,7 +167893,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr assert( pParse->pNewTrigger==0 ); assert( pParse->nVar==0 ); assert( pParse->pVList==0 ); - pParse->pParentParse = db->pParse; + pParentParse = db->pParse; db->pParse = pParse; while( 1 ){ n = sqlite3GetToken((u8*)zSql, &tokenType); @@ -164453,6 +167913,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr #endif /* SQLITE_OMIT_WINDOWFUNC */ if( AtomicLoad(&db->u1.isInterrupted) ){ pParse->rc = SQLITE_INTERRUPT; + pParse->nErr++; break; } if( tokenType==TK_SPACE ){ @@ -164482,7 +167943,10 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ }else{ - sqlite3ErrorMsg(pParse, "unrecognized token: \"%.*s\"", n, zSql); + Token x; + x.z = zSql; + x.n = n; + sqlite3ErrorMsg(pParse, "unrecognized token: \"%T\"", &x); break; } } @@ -164510,46 +167974,30 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr if( db->mallocFailed ){ pParse->rc = SQLITE_NOMEM_BKPT; } - if( pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE && pParse->zErrMsg==0 ){ - pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc)); - } - assert( pzErrMsg!=0 ); - if( pParse->zErrMsg ){ - *pzErrMsg = pParse->zErrMsg; - sqlite3_log(pParse->rc, "%s in \"%s\"", - *pzErrMsg, pParse->zTail); - pParse->zErrMsg = 0; + if( pParse->zErrMsg || (pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE) ){ + if( pParse->zErrMsg==0 ){ + pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc)); + } + sqlite3_log(pParse->rc, "%s in \"%s\"", pParse->zErrMsg, pParse->zTail); nErr++; } pParse->zTail = zSql; - if( pParse->pVdbe && pParse->nErr>0 && pParse->nested==0 ){ - sqlite3VdbeDelete(pParse->pVdbe); - pParse->pVdbe = 0; - } -#ifndef SQLITE_OMIT_SHARED_CACHE - if( pParse->nested==0 ){ - sqlite3DbFree(db, pParse->aTableLock); - pParse->aTableLock = 0; - pParse->nTableLock = 0; - } -#endif #ifndef SQLITE_OMIT_VIRTUALTABLE sqlite3_free(pParse->apVtabLock); #endif - if( !IN_SPECIAL_PARSE ){ + if( pParse->pNewTable && !IN_SPECIAL_PARSE ){ /* If the pParse->declareVtab flag is set, do not delete any table ** structure built up in pParse->pNewTable. The calling code (see vtab.c) ** will take responsibility for freeing the Table structure. */ sqlite3DeleteTable(db, pParse->pNewTable); } - if( !IN_RENAME_OBJECT ){ + if( pParse->pNewTrigger && !IN_RENAME_OBJECT ){ sqlite3DeleteTrigger(db, pParse->pNewTrigger); } sqlite3DbFree(db, pParse->pVList); - db->pParse = pParse->pParentParse; - pParse->pParentParse = 0; + db->pParse = pParentParse; assert( nErr==0 || pParse->rc!=SQLITE_OK ); return nErr; } @@ -165130,9 +168578,6 @@ SQLITE_PRIVATE int sqlite3Fts2Init(sqlite3*); #ifdef SQLITE_ENABLE_FTS5 SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3*); #endif -#ifdef SQLITE_ENABLE_JSON1 -SQLITE_PRIVATE int sqlite3Json1Init(sqlite3*); -#endif #ifdef SQLITE_ENABLE_STMTVTAB SQLITE_PRIVATE int sqlite3StmtVtabInit(sqlite3*); #endif @@ -165167,8 +168612,8 @@ static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = { sqlite3DbstatRegister, #endif sqlite3TestExtInit, -#ifdef SQLITE_ENABLE_JSON1 - sqlite3Json1Init, +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_JSON) + sqlite3JsonTableFunctions, #endif #ifdef SQLITE_ENABLE_STMTVTAB sqlite3StmtVtabInit, @@ -166166,7 +169611,7 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3 *db, sqlite3_int64 iRowid) /* ** Return the number of changes in the most recent call to sqlite3_exec(). */ -SQLITE_API int sqlite3_changes(sqlite3 *db){ +SQLITE_API sqlite3_int64 sqlite3_changes64(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; @@ -166175,11 +169620,14 @@ SQLITE_API int sqlite3_changes(sqlite3 *db){ #endif return db->nChange; } +SQLITE_API int sqlite3_changes(sqlite3 *db){ + return (int)sqlite3_changes64(db); +} /* ** Return the number of changes since the database handle was opened. */ -SQLITE_API int sqlite3_total_changes(sqlite3 *db){ +SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; @@ -166188,6 +169636,9 @@ SQLITE_API int sqlite3_total_changes(sqlite3 *db){ #endif return db->nTotalChange; } +SQLITE_API int sqlite3_total_changes(sqlite3 *db){ + return (int)sqlite3_total_changes64(db); +} /* ** Close all open savepoints. This function only manipulates fields of the @@ -166212,7 +169663,9 @@ SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *db){ ** with SQLITE_ANY as the encoding. */ static void functionDestroy(sqlite3 *db, FuncDef *p){ - FuncDestructor *pDestructor = p->u.pDestructor; + FuncDestructor *pDestructor; + assert( (p->funcFlags & SQLITE_FUNC_BUILTIN)==0 ); + pDestructor = p->u.pDestructor; if( pDestructor ){ pDestructor->nRef--; if( pDestructor->nRef==0 ){ @@ -166316,7 +169769,7 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){ /* Convert the connection into a zombie and then close it. */ - db->magic = SQLITE_MAGIC_ZOMBIE; + db->eOpenState = SQLITE_STATE_ZOMBIE; sqlite3LeaveMutexAndCloseZombie(db); return SQLITE_OK; } @@ -166380,7 +169833,7 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ ** or if the connection has not yet been closed by sqlite3_close_v2(), ** then just leave the mutex and return. */ - if( db->magic!=SQLITE_MAGIC_ZOMBIE || connectionIsBusy(db) ){ + if( db->eOpenState!=SQLITE_STATE_ZOMBIE || connectionIsBusy(db) ){ sqlite3_mutex_leave(db->mutex); return; } @@ -166466,7 +169919,7 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ sqlite3_free(db->auth.zAuthPW); #endif - db->magic = SQLITE_MAGIC_ERROR; + db->eOpenState = SQLITE_STATE_ERROR; /* The temp-database schema is allocated differently from the other schema ** objects (using sqliteMalloc() directly, instead of sqlite3BtreeSchema()). @@ -166475,8 +169928,11 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ ** structure? */ sqlite3DbFree(db, db->aDb[1].pSchema); + if( db->xAutovacDestr ){ + db->xAutovacDestr(db->pAutovacPagesArg); + } sqlite3_mutex_leave(db->mutex); - db->magic = SQLITE_MAGIC_CLOSED; + db->eOpenState = SQLITE_STATE_CLOSED; sqlite3_mutex_free(db->mutex); assert( sqlite3LookasideUsed(db,0)==0 ); if( db->lookaside.bMalloced ){ @@ -166529,7 +169985,7 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){ /* Any deferred constraint violations have now been resolved. */ db->nDeferredCons = 0; db->nDeferredImmCons = 0; - db->flags &= ~(u64)SQLITE_DeferFKs; + db->flags &= ~(u64)(SQLITE_DeferFKs|SQLITE_CorruptRdOnly); /* If one has been configured, invoke the rollback-hook callback */ if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){ @@ -166864,7 +170320,7 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ */ SQLITE_API void sqlite3_interrupt(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR - if( !sqlite3SafetyCheckOk(db) && (db==0 || db->magic!=SQLITE_MAGIC_ZOMBIE) ){ + if( !sqlite3SafetyCheckOk(db) && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE) ){ (void)SQLITE_MISUSE_BKPT; return; } @@ -166893,7 +170349,6 @@ SQLITE_PRIVATE int sqlite3CreateFunc( FuncDestructor *pDestructor ){ FuncDef *p; - int nName; int extraFlags; assert( sqlite3_mutex_held(db->mutex) ); @@ -166903,7 +170358,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc( || ((xFinal==0)!=(xStep==0)) /* Both or neither of xFinal and xStep */ || ((xValue==0)!=(xInverse==0)) /* Both or neither of xValue, xInverse */ || (nArg<-1 || nArg>SQLITE_MAX_FUNCTION_ARG) - || (255<(nName = sqlite3Strlen30( zFunctionName))) + || (255nRef==0 ){ - assert( rc!=SQLITE_OK ); + assert( rc!=SQLITE_OK || (xStep==0 && xFinal==0) ); xDestroy(p); sqlite3_free(pArg); } @@ -167366,6 +170832,34 @@ SQLITE_API void *sqlite3_preupdate_hook( } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ +/* +** Register a function to be invoked prior to each autovacuum that +** determines the number of pages to vacuum. +*/ +SQLITE_API int sqlite3_autovacuum_pages( + sqlite3 *db, /* Attach the hook to this database */ + unsigned int (*xCallback)(void*,const char*,u32,u32,u32), + void *pArg, /* Argument to the function */ + void (*xDestructor)(void*) /* Destructor for pArg */ +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + if( xDestructor ) xDestructor(pArg); + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + if( db->xAutovacDestr ){ + db->xAutovacDestr(db->pAutovacPagesArg); + } + db->xAutovacPages = xCallback; + db->pAutovacPagesArg = pArg; + db->xAutovacDestr = xDestructor; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + + #ifndef SQLITE_OMIT_WAL /* ** The sqlite3_wal_hook() callback registered by sqlite3_wal_autocheckpoint(). @@ -167628,6 +171122,19 @@ SQLITE_API const char *sqlite3_errmsg(sqlite3 *db){ return z; } +/* +** Return the byte offset of the most recent error +*/ +SQLITE_API int sqlite3_error_offset(sqlite3 *db){ + int iOffset = -1; + if( db && sqlite3SafetyCheckSickOrOk(db) && db->errCode ){ + sqlite3_mutex_enter(db->mutex); + iOffset = db->errByteOffset; + sqlite3_mutex_leave(db->mutex); + } + return iOffset; +} + #ifndef SQLITE_OMIT_UTF16 /* ** Return UTF-16 encoded English language explanation of the most recent @@ -167888,6 +171395,8 @@ SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ if( newLimit>=0 ){ /* IMP: R-52476-28732 */ if( newLimit>aHardLimit[limitId] ){ newLimit = aHardLimit[limitId]; /* IMP: R-51463-25634 */ + }else if( newLimit<1 && limitId==SQLITE_LIMIT_LENGTH ){ + newLimit = 1; } db->aLimit[limitId] = newLimit; } @@ -168159,7 +171668,7 @@ SQLITE_PRIVATE int sqlite3ParseUri( */ static const char *uriParameter(const char *zFilename, const char *zParam){ zFilename += sqlite3Strlen30(zFilename) + 1; - while( zFilename[0] ){ + while( ALWAYS(zFilename!=0) && zFilename[0] ){ int x = strcmp(zFilename, zParam); zFilename += sqlite3Strlen30(zFilename) + 1; if( x==0 ) return zFilename; @@ -168219,8 +171728,8 @@ static int openDatabase( ** dealt with in the previous code block. Besides these, the only ** valid input flags for sqlite3_open_v2() are SQLITE_OPEN_READONLY, ** SQLITE_OPEN_READWRITE, SQLITE_OPEN_CREATE, SQLITE_OPEN_SHAREDCACHE, - ** SQLITE_OPEN_PRIVATECACHE, and some reserved bits. Silently mask - ** off all other flags. + ** SQLITE_OPEN_PRIVATECACHE, SQLITE_OPEN_EXRESCODE, and some reserved + ** bits. Silently mask off all other flags. */ flags &= ~( SQLITE_OPEN_DELETEONCLOSE | SQLITE_OPEN_EXCLUSIVE | @@ -168255,9 +171764,9 @@ static int openDatabase( } } sqlite3_mutex_enter(db->mutex); - db->errMask = 0xff; + db->errMask = (flags & SQLITE_OPEN_EXRESCODE)!=0 ? 0xffffffff : 0xff; db->nDb = 2; - db->magic = SQLITE_MAGIC_BUSY; + db->eOpenState = SQLITE_STATE_BUSY; db->aDb = db->aDbStatic; db->lookaside.bDisable = 1; db->lookaside.sz = 0; @@ -168269,7 +171778,15 @@ static int openDatabase( db->nextAutovac = -1; db->szMmap = sqlite3GlobalConfig.szMmap; db->nextPagesize = 0; + db->init.azInit = sqlite3StdType; /* Any array of string ptrs will do */ +#ifdef SQLITE_ENABLE_SORTER_MMAP + /* Beginning with version 3.37.0, using the VFS xFetch() API to memory-map + ** the temporary files used to do external sorts (see code in vdbesort.c) + ** is disabled. It can still be used either by defining + ** SQLITE_ENABLE_SORTER_MMAP at compile time or by using the + ** SQLITE_TESTCTRL_SORTER_MMAP test-control at runtime. */ db->nMaxSorterMmap = 0x7FFFFFFF; +#endif db->flags |= SQLITE_ShortColNames | SQLITE_EnableTrigger | SQLITE_EnableView @@ -168417,7 +171934,7 @@ static int openDatabase( db->aDb[1].zDbSName = "temp"; db->aDb[1].safety_level = PAGER_SYNCHRONOUS_OFF; - db->magic = SQLITE_MAGIC_OPEN; + db->eOpenState = SQLITE_STATE_OPEN; if( db->mallocFailed ){ goto opendb_out; } @@ -168479,12 +171996,12 @@ static int openDatabase( sqlite3_mutex_leave(db->mutex); } rc = sqlite3_errcode(db); - assert( db!=0 || rc==SQLITE_NOMEM ); - if( rc==SQLITE_NOMEM ){ + assert( db!=0 || (rc&0xff)==SQLITE_NOMEM ); + if( (rc&0xff)==SQLITE_NOMEM ){ sqlite3_close(db); db = 0; }else if( rc!=SQLITE_OK ){ - db->magic = SQLITE_MAGIC_SICK; + db->eOpenState = SQLITE_STATE_SICK; } *ppDb = db; #ifdef SQLITE_ENABLE_SQLLOG @@ -168495,7 +172012,7 @@ static int openDatabase( } #endif sqlite3_free_filename(zOpen); - return rc & 0xff; + return rc; } @@ -168795,7 +172312,7 @@ SQLITE_API int sqlite3_table_column_metadata( /* Locate the table in question */ pTab = sqlite3FindTable(db, zTableName, zDbName); - if( !pTab || pTab->pSelect ){ + if( !pTab || IsView(pTab) ){ pTab = 0; goto error_out; } @@ -168806,7 +172323,7 @@ SQLITE_API int sqlite3_table_column_metadata( }else{ for(iCol=0; iColnCol; iCol++){ pCol = &pTab->aCol[iCol]; - if( 0==sqlite3StrICmp(pCol->zName, zColumnName) ){ + if( 0==sqlite3StrICmp(pCol->zCnName, zColumnName) ){ break; } } @@ -168833,7 +172350,7 @@ SQLITE_API int sqlite3_table_column_metadata( */ if( pCol ){ zDataType = sqlite3ColumnType(pCol,0); - zCollSeq = pCol->zColl; + zCollSeq = sqlite3ColumnColl(pCol); notnull = pCol->notNull!=0; primarykey = (pCol->colFlags & COLFLAG_PRIMKEY)!=0; autoinc = pTab->iPKey==iCol && (pTab->tabFlags & TF_Autoincrement)!=0; @@ -169040,12 +172557,16 @@ SQLITE_API int sqlite3_test_control(int op, ...){ ** sqlite3_test_control(). */ case SQLITE_TESTCTRL_FAULT_INSTALL: { - /* MSVC is picky about pulling func ptrs from va lists. - ** http://support.microsoft.com/kb/47961 + /* A bug in MSVC prevents it from understanding pointers to functions + ** types in the second argument to va_arg(). Work around the problem + ** using a typedef. + ** http://support.microsoft.com/kb/47961 <-- dead hyperlink + ** Search at http://web.archive.org/ to find the 2015-03-16 archive + ** of the link above to see the original text. ** sqlite3GlobalConfig.xTestCallback = va_arg(ap, int(*)(int)); */ - typedef int(*TESTCALLBACKFUNC_t)(int); - sqlite3GlobalConfig.xTestCallback = va_arg(ap, TESTCALLBACKFUNC_t); + typedef int(*sqlite3FaultFuncType)(int); + sqlite3GlobalConfig.xTestCallback = va_arg(ap, sqlite3FaultFuncType); rc = sqlite3FaultSim(0); break; } @@ -169172,13 +172693,27 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } - /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, int onoff); + /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, onoff, xAlt); ** - ** If parameter onoff is non-zero, subsequent calls to localtime() - ** and its variants fail. If onoff is zero, undo this setting. + ** If parameter onoff is 1, subsequent calls to localtime() fail. + ** If 2, then invoke xAlt() instead of localtime(). If 0, normal + ** processing. + ** + ** xAlt arguments are void pointers, but they really want to be: + ** + ** int xAlt(const time_t*, struct tm*); + ** + ** xAlt should write results in to struct tm object of its 2nd argument + ** and return zero on success, or return non-zero on failure. */ case SQLITE_TESTCTRL_LOCALTIME_FAULT: { sqlite3GlobalConfig.bLocaltimeFault = va_arg(ap, int); + if( sqlite3GlobalConfig.bLocaltimeFault==2 ){ + typedef int(*sqlite3LocaltimeType)(const void*,void*); + sqlite3GlobalConfig.xAltLocaltime = va_arg(ap, sqlite3LocaltimeType); + }else{ + sqlite3GlobalConfig.xAltLocaltime = 0; + } break; } @@ -169283,12 +172818,16 @@ SQLITE_API int sqlite3_test_control(int op, ...){ */ case SQLITE_TESTCTRL_IMPOSTER: { sqlite3 *db = va_arg(ap, sqlite3*); + int iDb; sqlite3_mutex_enter(db->mutex); - db->init.iDb = sqlite3FindDbName(db, va_arg(ap,const char*)); - db->init.busy = db->init.imposterTable = va_arg(ap,int); - db->init.newTnum = va_arg(ap,int); - if( db->init.busy==0 && db->init.newTnum>0 ){ - sqlite3ResetAllSchemasOfConnection(db); + iDb = sqlite3FindDbName(db, va_arg(ap,const char*)); + if( iDb>=0 ){ + db->init.iDb = iDb; + db->init.busy = db->init.imposterTable = va_arg(ap,int); + db->init.newTnum = va_arg(ap,int); + if( db->init.busy==0 && db->init.newTnum>0 ){ + sqlite3ResetAllSchemasOfConnection(db); + } } sqlite3_mutex_leave(db->mutex); break; @@ -169364,6 +172903,26 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } + /* sqlite3_test_control(SQLITE_TESTCTRL_LOGEST, + ** double fIn, // Input value + ** int *pLogEst, // sqlite3LogEstFromDouble(fIn) + ** u64 *pInt, // sqlite3LogEstToInt(*pLogEst) + ** int *pLogEst2 // sqlite3LogEst(*pInt) + ** ); + ** + ** Test access for the LogEst conversion routines. + */ + case SQLITE_TESTCTRL_LOGEST: { + double rIn = va_arg(ap, double); + LogEst rLogEst = sqlite3LogEstFromDouble(rIn); + u64 iInt = sqlite3LogEstToInt(rLogEst); + va_arg(ap, int*)[0] = rLogEst; + va_arg(ap, u64*)[0] = iInt; + va_arg(ap, int*)[0] = sqlite3LogEst(iInt); + break; + } + + #if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) /* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue) ** @@ -169500,7 +173059,7 @@ SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N){ if( zFilename==0 || N<0 ) return 0; zFilename = databaseName(zFilename); zFilename += sqlite3Strlen30(zFilename) + 1; - while( zFilename[0] && (N--)>0 ){ + while( ALWAYS(zFilename) && zFilename[0] && (N--)>0 ){ zFilename += sqlite3Strlen30(zFilename) + 1; zFilename += sqlite3Strlen30(zFilename) + 1; } @@ -169543,12 +173102,14 @@ SQLITE_API sqlite3_int64 sqlite3_uri_int64( ** corruption. */ SQLITE_API const char *sqlite3_filename_database(const char *zFilename){ + if( zFilename==0 ) return 0; return databaseName(zFilename); } SQLITE_API const char *sqlite3_filename_journal(const char *zFilename){ + if( zFilename==0 ) return 0; zFilename = databaseName(zFilename); zFilename += sqlite3Strlen30(zFilename) + 1; - while( zFilename[0] ){ + while( ALWAYS(zFilename) && zFilename[0] ){ zFilename += sqlite3Strlen30(zFilename) + 1; zFilename += sqlite3Strlen30(zFilename) + 1; } @@ -169559,7 +173120,7 @@ SQLITE_API const char *sqlite3_filename_wal(const char *zFilename){ return 0; #else zFilename = sqlite3_filename_journal(zFilename); - zFilename += sqlite3Strlen30(zFilename) + 1; + if( zFilename ) zFilename += sqlite3Strlen30(zFilename) + 1; return zFilename; #endif } @@ -170852,17 +174413,18 @@ SQLITE_API extern int sqlite3_fts3_may_be_corrupt; ** Macros indicating that conditional expressions are always true or ** false. */ -#ifdef SQLITE_COVERAGE_TEST -# define ALWAYS(x) (1) -# define NEVER(X) (0) -#elif defined(SQLITE_DEBUG) -# define ALWAYS(x) sqlite3Fts3Always((x)!=0) -# define NEVER(x) sqlite3Fts3Never((x)!=0) -SQLITE_PRIVATE int sqlite3Fts3Always(int b); -SQLITE_PRIVATE int sqlite3Fts3Never(int b); +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST) +# define SQLITE_OMIT_AUXILIARY_SAFETY_CHECKS 1 +#endif +#if defined(SQLITE_OMIT_AUXILIARY_SAFETY_CHECKS) +# define ALWAYS(X) (1) +# define NEVER(X) (0) +#elif !defined(NDEBUG) +# define ALWAYS(X) ((X)?1:(assert(0),0)) +# define NEVER(X) ((X)?(assert(0),1):0) #else -# define ALWAYS(x) (x) -# define NEVER(x) (x) +# define ALWAYS(X) (X) +# define NEVER(X) (X) #endif /* @@ -171321,6 +174883,7 @@ SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *); SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3 *db, Fts3Hash*); SQLITE_PRIVATE int sqlite3Fts3InitTerm(sqlite3 *db); #endif +SQLITE_PRIVATE void *sqlite3Fts3MallocZero(i64 nByte); SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer(sqlite3_tokenizer *, int, const char *, int, sqlite3_tokenizer_cursor ** @@ -171340,7 +174903,7 @@ SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *) SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr); /* fts3_tokenize_vtab.c */ -SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *); +SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *, void(*xDestroy)(void*)); /* fts3_unicode2.c (functions generated by parsing unicode text files) */ #ifndef SQLITE_DISABLE_FTS3_UNICODE @@ -171373,18 +174936,17 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int); SQLITE_EXTENSION_INIT1 #endif +typedef struct Fts3HashWrapper Fts3HashWrapper; +struct Fts3HashWrapper { + Fts3Hash hash; /* Hash table */ + int nRef; /* Number of pointers to this object */ +}; + static int fts3EvalNext(Fts3Cursor *pCsr); static int fts3EvalStart(Fts3Cursor *pCsr); static int fts3TermSegReaderCursor( Fts3Cursor *, const char *, int, int, Fts3MultiSegReader **); -#ifndef SQLITE_AMALGAMATION -# if defined(SQLITE_DEBUG) -SQLITE_PRIVATE int sqlite3Fts3Always(int b) { assert( b ); return b; } -SQLITE_PRIVATE int sqlite3Fts3Never(int b) { assert( !b ); return b; } -# endif -#endif - /* ** This variable is set to false when running tests for which the on disk ** structures should not be corrupt. Otherwise, true. If it is false, extra @@ -172244,7 +175806,7 @@ static int fts3InitVtab( sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ char **pzErr /* Write any error message here */ ){ - Fts3Hash *pHash = (Fts3Hash *)pAux; + Fts3Hash *pHash = &((Fts3HashWrapper*)pAux)->hash; Fts3Table *p = 0; /* Pointer to allocated vtab */ int rc = SQLITE_OK; /* Return code */ int i; /* Iterator variable */ @@ -175079,9 +178641,12 @@ static const sqlite3_module fts3Module = { ** allocated for the tokenizer hash table. */ static void hashDestroy(void *p){ - Fts3Hash *pHash = (Fts3Hash *)p; - sqlite3Fts3HashClear(pHash); - sqlite3_free(pHash); + Fts3HashWrapper *pHash = (Fts3HashWrapper *)p; + pHash->nRef--; + if( pHash->nRef<=0 ){ + sqlite3Fts3HashClear(&pHash->hash); + sqlite3_free(pHash); + } } /* @@ -175111,7 +178676,7 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const */ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){ int rc = SQLITE_OK; - Fts3Hash *pHash = 0; + Fts3HashWrapper *pHash = 0; const sqlite3_tokenizer_module *pSimple = 0; const sqlite3_tokenizer_module *pPorter = 0; #ifndef SQLITE_DISABLE_FTS3_UNICODE @@ -175139,23 +178704,24 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){ sqlite3Fts3PorterTokenizerModule(&pPorter); /* Allocate and initialize the hash-table used to store tokenizers. */ - pHash = sqlite3_malloc(sizeof(Fts3Hash)); + pHash = sqlite3_malloc(sizeof(Fts3HashWrapper)); if( !pHash ){ rc = SQLITE_NOMEM; }else{ - sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); + sqlite3Fts3HashInit(&pHash->hash, FTS3_HASH_STRING, 1); + pHash->nRef = 0; } /* Load the built-in tokenizers into the hash table */ if( rc==SQLITE_OK ){ - if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) - || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) + if( sqlite3Fts3HashInsert(&pHash->hash, "simple", 7, (void *)pSimple) + || sqlite3Fts3HashInsert(&pHash->hash, "porter", 7, (void *)pPorter) #ifndef SQLITE_DISABLE_FTS3_UNICODE - || sqlite3Fts3HashInsert(pHash, "unicode61", 10, (void *)pUnicode) + || sqlite3Fts3HashInsert(&pHash->hash, "unicode61", 10, (void *)pUnicode) #endif #ifdef SQLITE_ENABLE_ICU - || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) + || (pIcu && sqlite3Fts3HashInsert(&pHash->hash, "icu", 4, (void *)pIcu)) #endif ){ rc = SQLITE_NOMEM; @@ -175164,7 +178730,7 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){ #ifdef SQLITE_TEST if( rc==SQLITE_OK ){ - rc = sqlite3Fts3ExprInitTestInterface(db, pHash); + rc = sqlite3Fts3ExprInitTestInterface(db, &pHash->hash); } #endif @@ -175173,23 +178739,26 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){ ** module with sqlite. */ if( SQLITE_OK==rc - && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) + && SQLITE_OK==(rc=sqlite3Fts3InitHashTable(db,&pHash->hash,"fts3_tokenizer")) && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", 1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 2)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", 1)) ){ + pHash->nRef++; rc = sqlite3_create_module_v2( db, "fts3", &fts3Module, (void *)pHash, hashDestroy ); if( rc==SQLITE_OK ){ + pHash->nRef++; rc = sqlite3_create_module_v2( - db, "fts4", &fts3Module, (void *)pHash, 0 + db, "fts4", &fts3Module, (void *)pHash, hashDestroy ); } if( rc==SQLITE_OK ){ - rc = sqlite3Fts3InitTok(db, (void *)pHash); + pHash->nRef++; + rc = sqlite3Fts3InitTok(db, (void *)pHash, hashDestroy); } return rc; } @@ -175198,7 +178767,7 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){ /* An error has occurred. Delete the hash table and return the error code. */ assert( rc!=SQLITE_OK ); if( pHash ){ - sqlite3Fts3HashClear(pHash); + sqlite3Fts3HashClear(&pHash->hash); sqlite3_free(pHash); } return rc; @@ -175545,7 +179114,7 @@ SQLITE_PRIVATE void sqlite3Fts3DoclistPrev( assert( nDoclist>0 ); assert( *pbEof==0 ); - assert( p || *piDocid==0 ); + assert_fts3_nc( p || *piDocid==0 ); assert( !p || (p>aDoclist && p<&aDoclist[nDoclist]) ); if( p==0 ){ @@ -176409,8 +179978,8 @@ static void fts3EvalNextRow( Fts3Expr *pRight = pExpr->pRight; sqlite3_int64 iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid); - assert( pLeft->bStart || pLeft->iDocid==pRight->iDocid ); - assert( pRight->bStart || pLeft->iDocid==pRight->iDocid ); + assert_fts3_nc( pLeft->bStart || pLeft->iDocid==pRight->iDocid ); + assert_fts3_nc( pRight->bStart || pLeft->iDocid==pRight->iDocid ); if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){ fts3EvalNextRow(pCsr, pLeft, pRc); @@ -177048,6 +180617,9 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist( if( bEofSave==0 && pNear->iDocid==iDocid ) break; } assert( rc!=SQLITE_OK || pPhrase->bIncr==0 ); + if( rc==SQLITE_OK && pNear->bEof!=bEofSave ){ + rc = FTS_CORRUPT_VTAB; + } } if( bTreeEof ){ while( rc==SQLITE_OK && !pNear->bEof ){ @@ -177470,6 +181042,7 @@ static int fts3auxNextMethod(sqlite3_vtab_cursor *pCursor){ if( fts3auxGrowStatArray(pCsr, 2) ) return SQLITE_NOMEM; memset(pCsr->aStat, 0, sizeof(struct Fts3auxColstats) * pCsr->nStat); iCol = 0; + rc = SQLITE_OK; while( iaStat[iCol+1].nDoc++; eState = 2; @@ -177521,7 +181098,6 @@ static int fts3auxNextMethod(sqlite3_vtab_cursor *pCursor){ } pCsr->iCol = 0; - rc = SQLITE_OK; }else{ pCsr->isEof = 1; } @@ -177850,7 +181426,7 @@ static int fts3isspace(char c){ ** zero the memory before returning a pointer to it. If unsuccessful, ** return NULL. */ -static void *fts3MallocZero(sqlite3_int64 nByte){ +SQLITE_PRIVATE void *sqlite3Fts3MallocZero(sqlite3_int64 nByte){ void *pRet = sqlite3_malloc64(nByte); if( pRet ) memset(pRet, 0, nByte); return pRet; @@ -177931,7 +181507,7 @@ static int getNextToken( rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); if( rc==SQLITE_OK ){ nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; - pRet = (Fts3Expr *)fts3MallocZero(nByte); + pRet = (Fts3Expr *)sqlite3Fts3MallocZero(nByte); if( !pRet ){ rc = SQLITE_NOMEM; }else{ @@ -178186,7 +181762,7 @@ static int getNextNode( if( fts3isspace(cNext) || cNext=='"' || cNext=='(' || cNext==')' || cNext==0 ){ - pRet = (Fts3Expr *)fts3MallocZero(sizeof(Fts3Expr)); + pRet = (Fts3Expr *)sqlite3Fts3MallocZero(sizeof(Fts3Expr)); if( !pRet ){ return SQLITE_NOMEM; } @@ -178365,7 +181941,7 @@ static int fts3ExprParse( && p->eType==FTSQUERY_PHRASE && pParse->isNot ){ /* Create an implicit NOT operator. */ - Fts3Expr *pNot = fts3MallocZero(sizeof(Fts3Expr)); + Fts3Expr *pNot = sqlite3Fts3MallocZero(sizeof(Fts3Expr)); if( !pNot ){ sqlite3Fts3ExprFree(p); rc = SQLITE_NOMEM; @@ -178399,7 +181975,7 @@ static int fts3ExprParse( /* Insert an implicit AND operator. */ Fts3Expr *pAnd; assert( pRet && pPrev ); - pAnd = fts3MallocZero(sizeof(Fts3Expr)); + pAnd = sqlite3Fts3MallocZero(sizeof(Fts3Expr)); if( !pAnd ){ sqlite3Fts3ExprFree(p); rc = SQLITE_NOMEM; @@ -181255,7 +184831,7 @@ static int fts3tokRowidMethod( ** Register the fts3tok module with database connection db. Return SQLITE_OK ** if successful or an error code if sqlite3_create_module() fails. */ -SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){ +SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash, void(*xDestroy)(void*)){ static const sqlite3_module fts3tok_module = { 0, /* iVersion */ fts3tokConnectMethod, /* xCreate */ @@ -181284,7 +184860,9 @@ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){ }; int rc; /* Return code */ - rc = sqlite3_create_module(db, "fts3tokenize", &fts3tok_module, (void*)pHash); + rc = sqlite3_create_module_v2( + db, "fts3tokenize", &fts3tok_module, (void*)pHash, xDestroy + ); return rc; } @@ -182629,8 +186207,18 @@ static int fts3SegReaderNext( char *aCopy; PendingList *pList = (PendingList *)fts3HashData(pElem); int nCopy = pList->nData+1; - pReader->zTerm = (char *)fts3HashKey(pElem); - pReader->nTerm = fts3HashKeysize(pElem); + + int nTerm = fts3HashKeysize(pElem); + if( (nTerm+1)>pReader->nTermAlloc ){ + sqlite3_free(pReader->zTerm); + pReader->zTerm = (char*)sqlite3_malloc((nTerm+1)*2); + if( !pReader->zTerm ) return SQLITE_NOMEM; + pReader->nTermAlloc = (nTerm+1)*2; + } + memcpy(pReader->zTerm, fts3HashKey(pElem), nTerm); + pReader->zTerm[nTerm] = '\0'; + pReader->nTerm = nTerm; + aCopy = (char*)sqlite3_malloc(nCopy); if( !aCopy ) return SQLITE_NOMEM; memcpy(aCopy, pList->aData, nCopy); @@ -182883,9 +186471,7 @@ SQLITE_PRIVATE int sqlite3Fts3MsrOvfl( */ SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *pReader){ if( pReader ){ - if( !fts3SegReaderIsPending(pReader) ){ - sqlite3_free(pReader->zTerm); - } + sqlite3_free(pReader->zTerm); if( !fts3SegReaderIsRootOnly(pReader) ){ sqlite3_free(pReader->aNode); } @@ -185077,7 +188663,7 @@ static int nodeReaderNext(NodeReader *p){ return FTS_CORRUPT_VTAB; } blobGrowBuffer(&p->term, nPrefix+nSuffix, &rc); - if( rc==SQLITE_OK ){ + if( rc==SQLITE_OK && ALWAYS(p->term.a!=0) ){ memcpy(&p->term.a[nPrefix], &p->aNode[p->iOff], nSuffix); p->term.n = nPrefix+nSuffix; p->iOff += nSuffix; @@ -185471,7 +189057,11 @@ static int fts3TermCmp( int nCmp = MIN(nLhs, nRhs); int res; - res = (nCmp ? memcmp(zLhs, zRhs, nCmp) : 0); + if( nCmp && ALWAYS(zLhs) && ALWAYS(zRhs) ){ + res = memcmp(zLhs, zRhs, nCmp); + }else{ + res = 0; + } if( res==0 ) res = nLhs - nRhs; return res; @@ -186115,7 +189705,7 @@ static int fts3IncrmergeHintLoad(Fts3Table *p, Blob *pHint){ if( aHint ){ blobGrowBuffer(pHint, nHint, &rc); if( rc==SQLITE_OK ){ - memcpy(pHint->a, aHint, nHint); + if( ALWAYS(pHint->a!=0) ) memcpy(pHint->a, aHint, nHint); pHint->n = nHint; } } @@ -187232,9 +190822,8 @@ static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ + sizeof(MatchinfoBuffer); sqlite3_int64 nStr = strlen(zMatchinfo); - pRet = sqlite3_malloc64(nByte + nStr+1); + pRet = sqlite3Fts3MallocZero(nByte + nStr+1); if( pRet ){ - memset(pRet, 0, nByte); pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet; pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0] + sizeof(u32)*((int)nElem+1); @@ -187638,11 +191227,10 @@ static int fts3BestSnippet( ** the required space using malloc(). */ nByte = sizeof(SnippetPhrase) * nList; - sIter.aPhrase = (SnippetPhrase *)sqlite3_malloc64(nByte); + sIter.aPhrase = (SnippetPhrase *)sqlite3Fts3MallocZero(nByte); if( !sIter.aPhrase ){ return SQLITE_NOMEM; } - memset(sIter.aPhrase, 0, nByte); /* Initialize the contents of the SnippetIter object. Then iterate through ** the set of phrases in the expression to populate the aPhrase[] array. @@ -188206,10 +191794,12 @@ static int fts3MatchinfoLcsCb( ** position list for the next column. */ static int fts3LcsIteratorAdvance(LcsIterator *pIter){ - char *pRead = pIter->pRead; + char *pRead; sqlite3_int64 iRead; int rc = 0; + if( NEVER(pIter==0) ) return 1; + pRead = pIter->pRead; pRead += sqlite3Fts3GetVarint(pRead, &iRead); if( iRead==0 || iRead==1 ){ pRead = 0; @@ -188243,9 +191833,8 @@ static int fts3MatchinfoLcs(Fts3Cursor *pCsr, MatchInfo *pInfo){ /* Allocate and populate the array of LcsIterator objects. The array ** contains one element for each matchable phrase in the query. **/ - aIter = sqlite3_malloc64(sizeof(LcsIterator) * pCsr->nPhrase); + aIter = sqlite3Fts3MallocZero(sizeof(LcsIterator) * pCsr->nPhrase); if( !aIter ) return SQLITE_NOMEM; - memset(aIter, 0, sizeof(LcsIterator) * pCsr->nPhrase); (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); for(i=0; inPhrase; i++){ @@ -188706,7 +192295,7 @@ SQLITE_PRIVATE void sqlite3Fts3Offsets( if( rc!=SQLITE_OK ) goto offsets_out; /* Allocate the array of TermOffset iterators. */ - sCtx.aTerm = (TermOffset *)sqlite3_malloc64(sizeof(TermOffset)*nToken); + sCtx.aTerm = (TermOffset *)sqlite3Fts3MallocZero(sizeof(TermOffset)*nToken); if( 0==sCtx.aTerm ){ rc = SQLITE_NOMEM; goto offsets_out; @@ -188727,13 +192316,13 @@ SQLITE_PRIVATE void sqlite3Fts3Offsets( const char *zDoc; int nDoc; - /* Initialize the contents of sCtx.aTerm[] for column iCol. There is - ** no way that this operation can fail, so the return code from - ** fts3ExprIterate() can be discarded. + /* Initialize the contents of sCtx.aTerm[] for column iCol. This + ** operation may fail if the database contains corrupt records. */ sCtx.iCol = iCol; sCtx.iTerm = 0; - (void)fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx); + rc = fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx); + if( rc!=SQLITE_OK ) goto offsets_out; /* Retreive the text stored in column iCol. If an SQL NULL is stored ** in column iCol, jump immediately to the next iteration of the loop. @@ -189632,7 +193221,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ #endif /* !defined(SQLITE_DISABLE_FTS3_UNICODE) */ /************** End of fts3_unicode2.c ***************************************/ -/************** Begin file json1.c *******************************************/ +/************** Begin file json.c ********************************************/ /* ** 2015-08-12 ** @@ -189645,10 +193234,10 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** ****************************************************************************** ** -** This SQLite extension implements JSON functions. The interface is -** modeled after MySQL JSON functions: +** This SQLite JSON functions. ** -** https://dev.mysql.com/doc/refman/5.7/en/json.html +** This file began as an extension in ext/misc/json1.c in 2015. That +** extension proved so useful that it has now been moved into the core. ** ** For the time being, all JSON is stored as pure text. (We might add ** a JSONB type in the future which stores a binary encoding of JSON in @@ -189656,48 +193245,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** This implementation parses JSON text at 250 MB/s, so it is hard to see ** how JSONB might improve on that.) */ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_JSON1) -#if !defined(SQLITEINT_H) -/* #include "sqlite3ext.h" */ -#endif -SQLITE_EXTENSION_INIT1 -/* #include */ -/* #include */ -/* #include */ -/* #include */ - -/* Mark a function parameter as unused, to suppress nuisance compiler -** warnings. */ -#ifndef UNUSED_PARAM -# define UNUSED_PARAM(X) (void)(X) -#endif - -#ifndef LARGEST_INT64 -# define LARGEST_INT64 (0xffffffff|(((sqlite3_int64)0x7fffffff)<<32)) -# define SMALLEST_INT64 (((sqlite3_int64)-1) - LARGEST_INT64) -#endif - -#ifndef deliberate_fall_through -# define deliberate_fall_through -#endif - -/* -** Versions of isspace(), isalnum() and isdigit() to which it is safe -** to pass signed char values. -*/ -#ifdef sqlite3Isdigit - /* Use the SQLite core versions if this routine is part of the - ** SQLite amalgamation */ -# define safe_isdigit(x) sqlite3Isdigit(x) -# define safe_isalnum(x) sqlite3Isalnum(x) -# define safe_isxdigit(x) sqlite3Isxdigit(x) -#else - /* Use the standard library for separate compilation */ -#include /* amalgamator: keep */ -# define safe_isdigit(x) isdigit((unsigned char)(x)) -# define safe_isalnum(x) isalnum((unsigned char)(x)) -# define safe_isxdigit(x) isxdigit((unsigned char)(x)) -#endif +#ifndef SQLITE_OMIT_JSON +/* #include "sqliteInt.h" */ /* ** Growing our own isspace() routine this way is twice as fast as @@ -189722,15 +193271,12 @@ static const char jsonIsSpace[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; -#define safe_isspace(x) (jsonIsSpace[(unsigned char)x]) +#define fast_isspace(x) (jsonIsSpace[(unsigned char)x]) -#ifndef SQLITE_AMALGAMATION - /* Unsigned integer types. These are already defined in the sqliteInt.h, - ** but the definitions need to be repeated for separate compilation. */ - typedef sqlite3_uint64 u64; - typedef unsigned int u32; - typedef unsigned short int u16; - typedef unsigned char u8; +#if !defined(SQLITE_DEBUG) && !defined(SQLITE_COVERAGE_TEST) +# define VVA(X) +#else +# define VVA(X) X #endif /* Objects */ @@ -189789,13 +193335,14 @@ static const char * const jsonType[] = { struct JsonNode { u8 eType; /* One of the JSON_ type values */ u8 jnFlags; /* JNODE flags */ + u8 eU; /* Which union element to use */ u32 n; /* Bytes of content, or number of sub-nodes */ union { - const char *zJContent; /* Content for INT, REAL, and STRING */ - u32 iAppend; /* More terms for ARRAY and OBJECT */ - u32 iKey; /* Key for ARRAY objects in json_tree() */ - u32 iReplace; /* Replacement content for JNODE_REPLACE */ - JsonNode *pPatch; /* Node chain of patch for JNODE_PATCH */ + const char *zJContent; /* 1: Content for INT, REAL, and STRING */ + u32 iAppend; /* 2: More terms for ARRAY and OBJECT */ + u32 iKey; /* 3: Key for ARRAY objects in json_tree() */ + u32 iReplace; /* 4: Replacement content for JNODE_REPLACE */ + JsonNode *pPatch; /* 5: Node chain of patch for JNODE_PATCH */ } u; }; @@ -190073,11 +193620,14 @@ static void jsonRenderNode( JsonString *pOut, /* Write JSON here */ sqlite3_value **aReplace /* Replacement values */ ){ + assert( pNode!=0 ); if( pNode->jnFlags & (JNODE_REPLACE|JNODE_PATCH) ){ - if( pNode->jnFlags & JNODE_REPLACE ){ + if( (pNode->jnFlags & JNODE_REPLACE)!=0 && ALWAYS(aReplace!=0) ){ + assert( pNode->eU==4 ); jsonAppendValue(pOut, aReplace[pNode->u.iReplace]); return; } + assert( pNode->eU==5 ); pNode = pNode->u.pPatch; } switch( pNode->eType ){ @@ -190096,6 +193646,7 @@ static void jsonRenderNode( } case JSON_STRING: { if( pNode->jnFlags & JNODE_RAW ){ + assert( pNode->eU==1 ); jsonAppendString(pOut, pNode->u.zJContent, pNode->n); break; } @@ -190103,6 +193654,7 @@ static void jsonRenderNode( } case JSON_REAL: case JSON_INT: { + assert( pNode->eU==1 ); jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); break; } @@ -190118,6 +193670,7 @@ static void jsonRenderNode( j += jsonNodeSize(&pNode[j]); } if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; + assert( pNode->eU==2 ); pNode = &pNode[pNode->u.iAppend]; j = 1; } @@ -190138,6 +193691,7 @@ static void jsonRenderNode( j += 1 + jsonNodeSize(&pNode[j+1]); } if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; + assert( pNode->eU==2 ); pNode = &pNode[pNode->u.iAppend]; j = 1; } @@ -190182,10 +193736,10 @@ static u8 jsonHexToInt(int h){ */ static u32 jsonHexToInt4(const char *z){ u32 v; - assert( safe_isxdigit(z[0]) ); - assert( safe_isxdigit(z[1]) ); - assert( safe_isxdigit(z[2]) ); - assert( safe_isxdigit(z[3]) ); + assert( sqlite3Isxdigit(z[0]) ); + assert( sqlite3Isxdigit(z[1]) ); + assert( sqlite3Isxdigit(z[2]) ); + assert( sqlite3Isxdigit(z[3]) ); v = (jsonHexToInt(z[0])<<12) + (jsonHexToInt(z[1])<<8) + (jsonHexToInt(z[2])<<4) @@ -190217,7 +193771,9 @@ static void jsonReturn( } case JSON_INT: { sqlite3_int64 i = 0; - const char *z = pNode->u.zJContent; + const char *z; + assert( pNode->eU==1 ); + z = pNode->u.zJContent; if( z[0]=='-' ){ z++; } while( z[0]>='0' && z[0]<='9' ){ unsigned v = *(z++) - '0'; @@ -190240,14 +193796,17 @@ static void jsonReturn( sqlite3_result_int64(pCtx, i); int_done: break; - int_as_real: i=0; /* no break */ deliberate_fall_through + int_as_real: ; /* no break */ deliberate_fall_through } case JSON_REAL: { double r; #ifdef SQLITE_AMALGAMATION - const char *z = pNode->u.zJContent; + const char *z; + assert( pNode->eU==1 ); + z = pNode->u.zJContent; sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); #else + assert( pNode->eU==1 ); r = strtod(pNode->u.zJContent, 0); #endif sqlite3_result_double(pCtx, r); @@ -190258,6 +193817,7 @@ static void jsonReturn( ** json_insert() and json_replace() and those routines do not ** call jsonReturn() */ if( pNode->jnFlags & JNODE_RAW ){ + assert( pNode->eU==1 ); sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n, SQLITE_TRANSIENT); }else @@ -190265,15 +193825,18 @@ static void jsonReturn( assert( (pNode->jnFlags & JNODE_RAW)==0 ); if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){ /* JSON formatted without any backslash-escapes */ + assert( pNode->eU==1 ); sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2, SQLITE_TRANSIENT); }else{ /* Translate JSON formatted string into raw text */ u32 i; u32 n = pNode->n; - const char *z = pNode->u.zJContent; + const char *z; char *zOut; u32 j; + assert( pNode->eU==1 ); + z = pNode->u.zJContent; zOut = sqlite3_malloc( n+1 ); if( zOut==0 ){ sqlite3_result_error_nomem(pCtx); @@ -190394,12 +193957,13 @@ static int jsonParseAddNode( const char *zContent /* Content */ ){ JsonNode *p; - if( pParse->nNode>=pParse->nAlloc ){ + if( pParse->aNode==0 || pParse->nNode>=pParse->nAlloc ){ return jsonParseAddNodeExpand(pParse, eType, n, zContent); } p = &pParse->aNode[pParse->nNode]; p->eType = (u8)eType; p->jnFlags = 0; + VVA( p->eU = zContent ? 1 : 0 ); p->n = n; p->u.zJContent = zContent; return pParse->nNode++; @@ -190410,7 +193974,7 @@ static int jsonParseAddNode( */ static int jsonIs4Hex(const char *z){ int i; - for(i=0; i<4; i++) if( !safe_isxdigit(z[i]) ) return 0; + for(i=0; i<4; i++) if( !sqlite3Isxdigit(z[i]) ) return 0; return 1; } @@ -190429,13 +193993,13 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ int x; JsonNode *pNode; const char *z = pParse->zJson; - while( safe_isspace(z[i]) ){ i++; } + while( fast_isspace(z[i]) ){ i++; } if( (c = z[i])=='{' ){ /* Parse object */ iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); if( iThis<0 ) return -1; for(j=i+1;;j++){ - while( safe_isspace(z[j]) ){ j++; } + while( fast_isspace(z[j]) ){ j++; } if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1; x = jsonParseValue(pParse, j); if( x<0 ){ @@ -190448,14 +194012,14 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ if( pNode->eType!=JSON_STRING ) return -1; pNode->jnFlags |= JNODE_LABEL; j = x; - while( safe_isspace(z[j]) ){ j++; } + while( fast_isspace(z[j]) ){ j++; } if( z[j]!=':' ) return -1; j++; x = jsonParseValue(pParse, j); pParse->iDepth--; if( x<0 ) return -1; j = x; - while( safe_isspace(z[j]) ){ j++; } + while( fast_isspace(z[j]) ){ j++; } c = z[j]; if( c==',' ) continue; if( c!='}' ) return -1; @@ -190467,8 +194031,9 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ /* Parse array */ iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); if( iThis<0 ) return -1; + memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u)); for(j=i+1;;j++){ - while( safe_isspace(z[j]) ){ j++; } + while( fast_isspace(z[j]) ){ j++; } if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1; x = jsonParseValue(pParse, j); pParse->iDepth--; @@ -190477,7 +194042,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ return -1; } j = x; - while( safe_isspace(z[j]) ){ j++; } + while( fast_isspace(z[j]) ){ j++; } c = z[j]; if( c==',' ) continue; if( c!=']' ) return -1; @@ -190514,17 +194079,17 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ return j+1; }else if( c=='n' && strncmp(z+i,"null",4)==0 - && !safe_isalnum(z[i+4]) ){ + && !sqlite3Isalnum(z[i+4]) ){ jsonParseAddNode(pParse, JSON_NULL, 0, 0); return i+4; }else if( c=='t' && strncmp(z+i,"true",4)==0 - && !safe_isalnum(z[i+4]) ){ + && !sqlite3Isalnum(z[i+4]) ){ jsonParseAddNode(pParse, JSON_TRUE, 0, 0); return i+4; }else if( c=='f' && strncmp(z+i,"false",5)==0 - && !safe_isalnum(z[i+5]) ){ + && !sqlite3Isalnum(z[i+5]) ){ jsonParseAddNode(pParse, JSON_FALSE, 0, 0); return i+5; }else if( c=='-' || (c>='0' && c<='9') ){ @@ -190595,7 +194160,7 @@ static int jsonParse( if( pParse->oom ) i = -1; if( i>0 ){ assert( pParse->iDepth==0 ); - while( safe_isspace(zJson[i]) ) i++; + while( fast_isspace(zJson[i]) ) i++; if( zJson[i] ) i = -1; } if( i<=0 ){ @@ -190731,6 +194296,7 @@ static JsonParse *jsonParseCached( ** a match. */ static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){ + assert( pNode->eU==1 ); if( pNode->jnFlags & JNODE_RAW ){ if( pNode->n!=nKey ) return 0; return strncmp(pNode->u.zJContent, zKey, nKey)==0; @@ -190777,14 +194343,15 @@ static JsonNode *jsonLookupStep( *pzErr = zPath; return 0; } + testcase( nKey==0 ); }else{ zKey = zPath; for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){} nKey = i; - } - if( nKey==0 ){ - *pzErr = zPath; - return 0; + if( nKey==0 ){ + *pzErr = zPath; + return 0; + } } j = 1; for(;;){ @@ -190796,6 +194363,7 @@ static JsonNode *jsonLookupStep( j += jsonNodeSize(&pRoot[j]); } if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; + assert( pRoot->eU==2 ); iRoot += pRoot->u.iAppend; pRoot = &pParse->aNode[iRoot]; j = 1; @@ -190810,8 +194378,10 @@ static JsonNode *jsonLookupStep( if( pParse->oom ) return 0; if( pNode ){ pRoot = &pParse->aNode[iRoot]; + assert( pRoot->eU==0 ); pRoot->u.iAppend = iStart - iRoot; pRoot->jnFlags |= JNODE_APPEND; + VVA( pRoot->eU = 2 ); pParse->aNode[iLabel].jnFlags |= JNODE_RAW; } return pNode; @@ -190819,7 +194389,7 @@ static JsonNode *jsonLookupStep( }else if( zPath[0]=='[' ){ i = 0; j = 1; - while( safe_isdigit(zPath[j]) ){ + while( sqlite3Isdigit(zPath[j]) ){ i = i*10 + zPath[j] - '0'; j++; } @@ -190834,18 +194404,19 @@ static JsonNode *jsonLookupStep( j += jsonNodeSize(&pBase[j]); } if( (pBase->jnFlags & JNODE_APPEND)==0 ) break; + assert( pBase->eU==2 ); iBase += pBase->u.iAppend; pBase = &pParse->aNode[iBase]; j = 1; } j = 2; - if( zPath[2]=='-' && safe_isdigit(zPath[3]) ){ + if( zPath[2]=='-' && sqlite3Isdigit(zPath[3]) ){ unsigned int x = 0; j = 3; do{ x = x*10 + zPath[j] - '0'; j++; - }while( safe_isdigit(zPath[j]) ); + }while( sqlite3Isdigit(zPath[j]) ); if( x>i ) return 0; i -= x; } @@ -190867,6 +194438,7 @@ static JsonNode *jsonLookupStep( j += jsonNodeSize(&pRoot[j]); } if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; + assert( pRoot->eU==2 ); iRoot += pRoot->u.iAppend; pRoot = &pParse->aNode[iRoot]; j = 1; @@ -190882,8 +194454,10 @@ static JsonNode *jsonLookupStep( if( pParse->oom ) return 0; if( pNode ){ pRoot = &pParse->aNode[iRoot]; + assert( pRoot->eU==0 ); pRoot->u.iAppend = iStart - iRoot; pRoot->jnFlags |= JNODE_APPEND; + VVA( pRoot->eU = 2 ); } return pNode; } @@ -191037,9 +194611,13 @@ static void jsonParseFunc( } jsonPrintf(100, &s,"node %3u: %7s n=%-4d up=%-4d", i, zType, x.aNode[i].n, x.aUp[i]); + assert( x.aNode[i].eU==0 || x.aNode[i].eU==1 ); if( x.aNode[i].u.zJContent!=0 ){ + assert( x.aNode[i].eU==1 ); jsonAppendRaw(&s, " ", 1); jsonAppendRaw(&s, x.aNode[i].u.zJContent, x.aNode[i].n); + }else{ + assert( x.aNode[i].eU==0 ); } jsonAppendRaw(&s, "\n", 1); } @@ -191057,7 +194635,7 @@ static void jsonTest1Func( int argc, sqlite3_value **argv ){ - UNUSED_PARAM(argc); + UNUSED_PARAMETER(argc); sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE); } #endif /* SQLITE_DEBUG */ @@ -191078,7 +194656,7 @@ static void jsonQuoteFunc( sqlite3_value **argv ){ JsonString jx; - UNUSED_PARAM(argc); + UNUSED_PARAMETER(argc); jsonInit(&jx, ctx); jsonAppendValue(&jx, argv[0]); @@ -191149,13 +194727,34 @@ static void jsonArrayLengthFunc( sqlite3_result_int64(ctx, n); } +/* +** Bit values for the flags passed into jsonExtractFunc() or +** jsonSetFunc() via the user-data value. +*/ +#define JSON_JSON 0x01 /* Result is always JSON */ +#define JSON_SQL 0x02 /* Result is always SQL */ +#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */ +#define JSON_ISSET 0x04 /* json_set(), not json_insert() */ + /* ** json_extract(JSON, PATH, ...) +** "->"(JSON,PATH) +** "->>"(JSON,PATH) +** +** Return the element described by PATH. Return NULL if that PATH element +** is not found. +** +** If JSON_JSON is set or if more that one PATH argument is supplied then +** always return a JSON representation of the result. If JSON_SQL is set, +** then always return an SQL representation of the result. If neither flag +** is present and argc==2, then return JSON for objects and arrays and SQL +** for all other values. +** +** When multiple PATH arguments are supplied, the result is a JSON array +** containing the result of each PATH. ** -** Return the element described by PATH. Return NULL if there is no -** PATH element. If there are multiple PATHs, then return a JSON array -** with the result from each path. Throw an error if the JSON or any PATH -** is malformed. +** Abbreviated JSON path expressions are allows if JSON_ABPATH, for +** compatibility with PG. */ static void jsonExtractFunc( sqlite3_context *ctx, @@ -191165,35 +194764,77 @@ static void jsonExtractFunc( JsonParse *p; /* The parse */ JsonNode *pNode; const char *zPath; + int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); JsonString jx; - int i; if( argc<2 ) return; p = jsonParseCached(ctx, argv, ctx); if( p==0 ) return; - jsonInit(&jx, ctx); - jsonAppendChar(&jx, '['); - for(i=1; inErr ) break; - if( argc>2 ){ + if( argc==2 ){ + /* With a single PATH argument */ + zPath = (const char*)sqlite3_value_text(argv[1]); + if( zPath==0 ) return; + if( flags & JSON_ABPATH ){ + if( zPath[0]!='$' ){ + /* The -> and ->> operators accept abbreviated PATH arguments. This + ** is mostly for compatibility with PostgreSQL, but also for + ** convenience. + ** + ** NUMBER ==> $[NUMBER] // PG compatible + ** LABEL ==> $.LABEL // PG compatible + ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience + */ + jsonInit(&jx, ctx); + if( sqlite3Isdigit(zPath[0]) ){ + jsonAppendRaw(&jx, "$[", 2); + jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); + jsonAppendRaw(&jx, "]", 2); + }else{ + jsonAppendRaw(&jx, "$.", 1 + (zPath[0]!='[')); + jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); + jsonAppendChar(&jx, 0); + } + pNode = jx.bErr ? 0 : jsonLookup(p, jx.zBuf, 0, ctx); + jsonReset(&jx); + }else{ + pNode = jsonLookup(p, zPath, 0, ctx); + } + if( pNode ){ + if( flags & JSON_JSON ){ + jsonReturnJson(pNode, ctx, 0); + }else{ + jsonReturn(pNode, ctx, 0); + sqlite3_result_subtype(ctx, 0); + } + } + }else{ + pNode = jsonLookup(p, zPath, 0, ctx); + if( p->nErr==0 && pNode ) jsonReturn(pNode, ctx, 0); + } + }else{ + /* Two or more PATH arguments results in a JSON array with each + ** element of the array being the value selected by one of the PATHs */ + int i; + jsonInit(&jx, ctx); + jsonAppendChar(&jx, '['); + for(i=1; inErr ) break; jsonAppendSeparator(&jx); if( pNode ){ jsonRenderNode(pNode, &jx, 0); }else{ jsonAppendRaw(&jx, "null", 4); } - }else if( pNode ){ - jsonReturn(pNode, ctx, 0); } + if( i==argc ){ + jsonAppendChar(&jx, ']'); + jsonResult(&jx); + sqlite3_result_subtype(ctx, JSON_SUBTYPE); + } + jsonReset(&jx); } - if( argc>2 && i==argc ){ - jsonAppendChar(&jx, ']'); - jsonResult(&jx); - sqlite3_result_subtype(ctx, JSON_SUBTYPE); - } - jsonReset(&jx); } /* This is the RFC 7396 MergePatch algorithm. @@ -191222,6 +194863,7 @@ static JsonNode *jsonMergePatch( const char *zKey; assert( pPatch[i].eType==JSON_STRING ); assert( pPatch[i].jnFlags & JNODE_LABEL ); + assert( pPatch[i].eU==1 ); nKey = pPatch[i].n; zKey = pPatch[i].u.zJContent; assert( (pPatch[i].jnFlags & JNODE_RAW)==0 ); @@ -191238,6 +194880,12 @@ static JsonNode *jsonMergePatch( if( pNew==0 ) return 0; pTarget = &pParse->aNode[iTarget]; if( pNew!=&pTarget[j+1] ){ + assert( pTarget[j+1].eU==0 + || pTarget[j+1].eU==1 + || pTarget[j+1].eU==2 ); + testcase( pTarget[j+1].eU==1 ); + testcase( pTarget[j+1].eU==2 ); + VVA( pTarget[j+1].eU = 5 ); pTarget[j+1].u.pPatch = pNew; pTarget[j+1].jnFlags |= JNODE_PATCH; } @@ -191253,9 +194901,14 @@ static JsonNode *jsonMergePatch( if( pParse->oom ) return 0; jsonRemoveAllNulls(pPatch); pTarget = &pParse->aNode[iTarget]; + assert( pParse->aNode[iRoot].eU==0 || pParse->aNode[iRoot].eU==2 ); + testcase( pParse->aNode[iRoot].eU==2 ); pParse->aNode[iRoot].jnFlags |= JNODE_APPEND; + VVA( pParse->aNode[iRoot].eU = 2 ); pParse->aNode[iRoot].u.iAppend = iStart - iRoot; iRoot = iStart; + assert( pParse->aNode[iPatch].eU==0 ); + VVA( pParse->aNode[iPatch].eU = 5 ); pParse->aNode[iPatch].jnFlags |= JNODE_PATCH; pParse->aNode[iPatch].u.pPatch = &pPatch[i+1]; } @@ -191277,7 +194930,7 @@ static void jsonPatchFunc( JsonParse y; /* The patch */ JsonNode *pResult; /* The result of the merge */ - UNUSED_PARAM(argc); + UNUSED_PARAMETER(argc); if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; if( jsonParse(&y, ctx, (const char*)sqlite3_value_text(argv[1])) ){ jsonParseReset(&x); @@ -191397,11 +195050,15 @@ static void jsonReplaceFunc( pNode = jsonLookup(&x, zPath, 0, ctx); if( x.nErr ) goto replace_err; if( pNode ){ + assert( pNode->eU==0 || pNode->eU==1 || pNode->eU==4 ); + testcase( pNode->eU!=0 && pNode->eU!=1 ); pNode->jnFlags |= (u8)JNODE_REPLACE; + VVA( pNode->eU = 4 ); pNode->u.iReplace = i + 1; } } if( x.aNode[0].jnFlags & JNODE_REPLACE ){ + assert( x.aNode[0].eU==4 ); sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]); }else{ jsonReturnJson(x.aNode, ctx, argv); @@ -191410,6 +195067,7 @@ static void jsonReplaceFunc( jsonParseReset(&x); } + /* ** json_set(JSON, PATH, VALUE, ...) ** @@ -191432,7 +195090,7 @@ static void jsonSetFunc( const char *zPath; u32 i; int bApnd; - int bIsSet = *(int*)sqlite3_user_data(ctx); + int bIsSet = sqlite3_user_data(ctx)!=0; if( argc<1 ) return; if( (argc&1)==0 ) { @@ -191451,11 +195109,15 @@ static void jsonSetFunc( }else if( x.nErr ){ goto jsonSetDone; }else if( pNode && (bApnd || bIsSet) ){ + testcase( pNode->eU!=0 && pNode->eU!=1 ); + assert( pNode->eU!=3 && pNode->eU!=5 ); + VVA( pNode->eU = 4 ); pNode->jnFlags |= (u8)JNODE_REPLACE; pNode->u.iReplace = i + 1; } } if( x.aNode[0].jnFlags & JNODE_REPLACE ){ + assert( x.aNode[0].eU==4 ); sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]); }else{ jsonReturnJson(x.aNode, ctx, argv); @@ -191468,8 +195130,8 @@ static void jsonSetFunc( ** json_type(JSON) ** json_type(JSON, PATH) ** -** Return the top-level "type" of a JSON string. Throw an error if -** either the JSON or PATH inputs are not well-formed. +** Return the top-level "type" of a JSON string. json_type() raises an +** error if either the JSON or PATH inputs are not well-formed. */ static void jsonTypeFunc( sqlite3_context *ctx, @@ -191505,7 +195167,7 @@ static void jsonValidFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ - UNUSED_PARAM(argc); + UNUSED_PARAMETER(argc); p = jsonParseCached(ctx, argv, 0); sqlite3_result_int(ctx, p!=0); } @@ -191525,7 +195187,7 @@ static void jsonArrayStep( sqlite3_value **argv ){ JsonString *pStr; - UNUSED_PARAM(argc); + UNUSED_PARAMETER(argc); pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ if( pStr->zBuf==0 ){ @@ -191585,8 +195247,8 @@ static void jsonGroupInverse( char *z; char c; JsonString *pStr; - UNUSED_PARAM(argc); - UNUSED_PARAM(argv); + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); #ifdef NEVER /* pStr is always non-NULL since jsonArrayStep() or jsonObjectStep() will @@ -191630,7 +195292,7 @@ static void jsonObjectStep( JsonString *pStr; const char *z; u32 n; - UNUSED_PARAM(argc); + UNUSED_PARAMETER(argc); pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ if( pStr->zBuf==0 ){ @@ -191721,10 +195383,10 @@ static int jsonEachConnect( #define JEACH_JSON 8 #define JEACH_ROOT 9 - UNUSED_PARAM(pzErr); - UNUSED_PARAM(argv); - UNUSED_PARAM(argc); - UNUSED_PARAM(pAux); + UNUSED_PARAMETER(pzErr); + UNUSED_PARAMETER(argv); + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(pAux); rc = sqlite3_declare_vtab(db, "CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path," "json HIDDEN,root HIDDEN)"); @@ -191747,7 +195409,7 @@ static int jsonEachDisconnect(sqlite3_vtab *pVtab){ static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ JsonEachCursor *pCur; - UNUSED_PARAM(p); + UNUSED_PARAMETER(p); pCur = sqlite3_malloc( sizeof(*pCur) ); if( pCur==0 ) return SQLITE_NOMEM; memset(pCur, 0, sizeof(*pCur)); @@ -191806,6 +195468,9 @@ static int jsonEachNext(sqlite3_vtab_cursor *cur){ JsonNode *pUp = &p->sParse.aNode[iUp]; p->eType = pUp->eType; if( pUp->eType==JSON_ARRAY ){ + assert( pUp->eU==0 || pUp->eU==3 ); + testcase( pUp->eU==3 ); + VVA( pUp->eU = 3 ); if( iUp==p->i-1 ){ pUp->u.iKey = 0; }else{ @@ -191834,6 +195499,33 @@ static int jsonEachNext(sqlite3_vtab_cursor *cur){ return SQLITE_OK; } +/* Append an object label to the JSON Path being constructed +** in pStr. +*/ +static void jsonAppendObjectPathElement( + JsonString *pStr, + JsonNode *pNode +){ + int jj, nn; + const char *z; + assert( pNode->eType==JSON_STRING ); + assert( pNode->jnFlags & JNODE_LABEL ); + assert( pNode->eU==1 ); + z = pNode->u.zJContent; + nn = pNode->n; + assert( nn>=2 ); + assert( z[0]=='"' ); + assert( z[nn-1]=='"' ); + if( nn>2 && sqlite3Isalpha(z[1]) ){ + for(jj=2; jjsParse.aNode[i]; pUp = &p->sParse.aNode[iUp]; if( pUp->eType==JSON_ARRAY ){ + assert( pUp->eU==3 || (pUp->eU==0 && pUp->u.iKey==0) ); + testcase( pUp->eU==0 ); jsonPrintf(30, pStr, "[%d]", pUp->u.iKey); }else{ assert( pUp->eType==JSON_OBJECT ); if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--; - assert( pNode->eType==JSON_STRING ); - assert( pNode->jnFlags & JNODE_LABEL ); - jsonPrintf(pNode->n+1, pStr, ".%.*s", pNode->n-2, pNode->u.zJContent+1); + jsonAppendObjectPathElement(pStr, pNode); } } @@ -191879,6 +195571,7 @@ static int jsonEachColumn( u32 iKey; if( p->bRecursive ){ if( p->iRowid==0 ) break; + assert( p->sParse.aNode[p->sParse.aUp[p->i]].eU==3 ); iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey; }else{ iKey = p->iRowid; @@ -191928,7 +195621,7 @@ static int jsonEachColumn( if( p->eType==JSON_ARRAY ){ jsonPrintf(30, &x, "[%d]", p->iRowid); }else if( p->eType==JSON_OBJECT ){ - jsonPrintf(pThis->n, &x, ".%.*s", pThis->n-2, pThis->u.zJContent+1); + jsonAppendObjectPathElement(&x, pThis); } } jsonResult(&x); @@ -191986,7 +195679,7 @@ static int jsonEachBestIndex( /* This implementation assumes that JSON and ROOT are the last two ** columns in the table */ assert( JEACH_ROOT == JEACH_JSON+1 ); - UNUSED_PARAM(tab); + UNUSED_PARAMETER(tab); aIdx[0] = aIdx[1] = -1; pConstraint = pIdxInfo->aConstraint; for(i=0; inConstraint; i++, pConstraint++){ @@ -191995,6 +195688,7 @@ static int jsonEachBestIndex( if( pConstraint->iColumn < JEACH_JSON ) continue; iCol = pConstraint->iColumn - JEACH_JSON; assert( iCol==0 || iCol==1 ); + testcase( iCol==0 ); iMask = 1 << iCol; if( pConstraint->usable==0 ){ unusableMask |= iMask; @@ -192041,8 +195735,8 @@ static int jsonEachFilter( const char *zRoot = 0; sqlite3_int64 n; - UNUSED_PARAM(idxStr); - UNUSED_PARAM(argc); + UNUSED_PARAMETER(idxStr); + UNUSED_PARAMETER(argc); jsonEachCursorReset(p); if( idxNum==0 ) return SQLITE_OK; z = (const char*)sqlite3_value_text(argv[0]); @@ -192092,6 +195786,8 @@ static int jsonEachFilter( p->iBegin = p->i = (int)(pNode - p->sParse.aNode); p->eType = pNode->eType; if( p->eType>=JSON_ARRAY ){ + assert( pNode->eU==0 ); + VVA( pNode->eU = 3 ); pNode->u.iKey = 0; p->iEnd = p->i + pNode->n + 1; if( p->bRecursive ){ @@ -192165,108 +195861,68 @@ static sqlite3_module jsonTreeModule = { 0 /* xShadowName */ }; #endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/**************************************************************************** -** The following routines are the only publically visible identifiers in this -** file. Call the following routines in order to register the various SQL -** functions and the virtual table implemented by this file. -****************************************************************************/ - -SQLITE_PRIVATE int sqlite3Json1Init(sqlite3 *db){ - int rc = SQLITE_OK; - unsigned int i; - static const struct { - const char *zName; - int nArg; - int flag; - void (*xFunc)(sqlite3_context*,int,sqlite3_value**); - } aFunc[] = { - { "json", 1, 0, jsonRemoveFunc }, - { "json_array", -1, 0, jsonArrayFunc }, - { "json_array_length", 1, 0, jsonArrayLengthFunc }, - { "json_array_length", 2, 0, jsonArrayLengthFunc }, - { "json_extract", -1, 0, jsonExtractFunc }, - { "json_insert", -1, 0, jsonSetFunc }, - { "json_object", -1, 0, jsonObjectFunc }, - { "json_patch", 2, 0, jsonPatchFunc }, - { "json_quote", 1, 0, jsonQuoteFunc }, - { "json_remove", -1, 0, jsonRemoveFunc }, - { "json_replace", -1, 0, jsonReplaceFunc }, - { "json_set", -1, 1, jsonSetFunc }, - { "json_type", 1, 0, jsonTypeFunc }, - { "json_type", 2, 0, jsonTypeFunc }, - { "json_valid", 1, 0, jsonValidFunc }, - +#endif /* !defined(SQLITE_OMIT_JSON) */ + +/* +** Register JSON functions. +*/ +SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ +#ifndef SQLITE_OMIT_JSON + static FuncDef aJsonFunc[] = { + JFUNCTION(json, 1, 0, jsonRemoveFunc), + JFUNCTION(json_array, -1, 0, jsonArrayFunc), + JFUNCTION(json_array_length, 1, 0, jsonArrayLengthFunc), + JFUNCTION(json_array_length, 2, 0, jsonArrayLengthFunc), + JFUNCTION(json_extract, -1, 0, jsonExtractFunc), + JFUNCTION(->, 2, JSON_JSON, jsonExtractFunc), + JFUNCTION(->>, 2, JSON_SQL, jsonExtractFunc), + JFUNCTION(json_insert, -1, 0, jsonSetFunc), + JFUNCTION(json_object, -1, 0, jsonObjectFunc), + JFUNCTION(json_patch, 2, 0, jsonPatchFunc), + JFUNCTION(json_quote, 1, 0, jsonQuoteFunc), + JFUNCTION(json_remove, -1, 0, jsonRemoveFunc), + JFUNCTION(json_replace, -1, 0, jsonReplaceFunc), + JFUNCTION(json_set, -1, JSON_ISSET, jsonSetFunc), + JFUNCTION(json_type, 1, 0, jsonTypeFunc), + JFUNCTION(json_type, 2, 0, jsonTypeFunc), + JFUNCTION(json_valid, 1, 0, jsonValidFunc), #if SQLITE_DEBUG - /* DEBUG and TESTING functions */ - { "json_parse", 1, 0, jsonParseFunc }, - { "json_test1", 1, 0, jsonTest1Func }, -#endif + JFUNCTION(json_parse, 1, 0, jsonParseFunc), + JFUNCTION(json_test1, 1, 0, jsonTest1Func), +#endif + WAGGREGATE(json_group_array, 1, 0, 0, + jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse, + SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS), + WAGGREGATE(json_group_object, 2, 0, 0, + jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse, + SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS) }; + sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc)); +#endif +} + +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_JSON) +/* +** Register the JSON table-valued functions +*/ +SQLITE_PRIVATE int sqlite3JsonTableFunctions(sqlite3 *db){ + int rc = SQLITE_OK; static const struct { - const char *zName; - int nArg; - void (*xStep)(sqlite3_context*,int,sqlite3_value**); - void (*xFinal)(sqlite3_context*); - void (*xValue)(sqlite3_context*); - } aAgg[] = { - { "json_group_array", 1, - jsonArrayStep, jsonArrayFinal, jsonArrayValue }, - { "json_group_object", 2, - jsonObjectStep, jsonObjectFinal, jsonObjectValue }, - }; -#ifndef SQLITE_OMIT_VIRTUALTABLE - static const struct { - const char *zName; - sqlite3_module *pModule; + const char *zName; + sqlite3_module *pModule; } aMod[] = { { "json_each", &jsonEachModule }, { "json_tree", &jsonTreeModule }, }; -#endif - static const int enc = - SQLITE_UTF8 | - SQLITE_DETERMINISTIC | - SQLITE_INNOCUOUS; - for(i=0; i */ /* #include */ @@ -192405,7 +196078,9 @@ struct Rtree { u8 nBytesPerCell; /* Bytes consumed per cell */ u8 inWrTrans; /* True if inside write transaction */ u8 nAux; /* # of auxiliary columns in %_rowid */ +#ifdef SQLITE_ENABLE_GEOPOLY u8 nAuxNotNull; /* Number of initial not-null aux columns */ +#endif #ifdef SQLITE_DEBUG u8 bCorrupt; /* Shadow table corruption detected */ #endif @@ -192687,7 +196362,12 @@ struct RtreeMatchArg { ** it is not, make it a no-op. */ #ifndef SQLITE_AMALGAMATION -# define testcase(X) +# if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) + unsigned int sqlite3RtreeTestcase = 0; +# define testcase(X) if( X ){ sqlite3RtreeTestcase += __LINE__; } +# else +# define testcase(X) +# endif #endif /* @@ -192936,18 +196616,6 @@ static void nodeBlobReset(Rtree *pRtree){ } } -/* -** Check to see if pNode is the same as pParent or any of the parents -** of pParent. -*/ -static int nodeInParentChain(const RtreeNode *pNode, const RtreeNode *pParent){ - do{ - if( pNode==pParent ) return 1; - pParent = pParent->pParent; - }while( pParent ); - return 0; -} - /* ** Obtain a reference to an r-tree node. */ @@ -192964,14 +196632,7 @@ static int nodeAcquire( ** increase its reference count and return it. */ if( (pNode = nodeHashLookup(pRtree, iNode))!=0 ){ - if( pParent && !pNode->pParent ){ - if( nodeInParentChain(pNode, pParent) ){ - RTREE_IS_CORRUPT(pRtree); - return SQLITE_CORRUPT_VTAB; - } - pParent->nRef++; - pNode->pParent = pParent; - }else if( pParent && pNode->pParent && pParent!=pNode->pParent ){ + if( pParent && pParent!=pNode->pParent ){ RTREE_IS_CORRUPT(pRtree); return SQLITE_CORRUPT_VTAB; } @@ -193029,7 +196690,7 @@ static int nodeAcquire( ** are the leaves, and so on. If the depth as specified on the root node ** is greater than RTREE_MAX_DEPTH, the r-tree structure must be corrupt. */ - if( pNode && rc==SQLITE_OK && iNode==1 ){ + if( rc==SQLITE_OK && pNode && iNode==1 ){ pRtree->iDepth = readInt16(pNode->zData); if( pRtree->iDepth>RTREE_MAX_DEPTH ){ rc = SQLITE_CORRUPT_VTAB; @@ -193552,20 +197213,29 @@ static void rtreeNonleafConstraint( switch( p->op ){ case RTREE_TRUE: return; /* Always satisfied */ case RTREE_FALSE: break; /* Never satisfied */ + case RTREE_EQ: + RTREE_DECODE_COORD(eInt, pCellData, val); + /* val now holds the lower bound of the coordinate pair */ + if( p->u.rValue>=val ){ + pCellData += 4; + RTREE_DECODE_COORD(eInt, pCellData, val); + /* val now holds the upper bound of the coordinate pair */ + if( p->u.rValue<=val ) return; + } + break; case RTREE_LE: case RTREE_LT: - case RTREE_EQ: RTREE_DECODE_COORD(eInt, pCellData, val); /* val now holds the lower bound of the coordinate pair */ if( p->u.rValue>=val ) return; - if( p->op!=RTREE_EQ ) break; /* RTREE_LE and RTREE_LT end here */ - /* Fall through for the RTREE_EQ case */ + break; - default: /* RTREE_GT or RTREE_GE, or fallthrough of RTREE_EQ */ + default: pCellData += 4; RTREE_DECODE_COORD(eInt, pCellData, val); /* val now holds the upper bound of the coordinate pair */ if( p->u.rValue<=val ) return; + break; } *peWithin = NOT_WITHIN; } @@ -193635,11 +197305,12 @@ static int nodeRowidIndex( */ static int nodeParentIndex(Rtree *pRtree, RtreeNode *pNode, int *piIndex){ RtreeNode *pParent = pNode->pParent; - if( pParent ){ + if( ALWAYS(pParent) ){ return nodeRowidIndex(pRtree, pParent, pNode->iNode, piIndex); + }else{ + *piIndex = -1; + return SQLITE_OK; } - *piIndex = -1; - return SQLITE_OK; } /* @@ -193762,7 +197433,8 @@ static RtreeSearchPoint *rtreeSearchPointNew( pNew = rtreeEnqueue(pCur, rScore, iLevel); if( pNew==0 ) return 0; ii = (int)(pNew - pCur->aPoint) + 1; - if( iiaNode[ii]==0 ); pCur->aNode[ii] = pCur->aNode[0]; }else{ @@ -193823,7 +197495,7 @@ static void rtreeSearchPointPop(RtreeCursor *p){ if( p->bPoint ){ p->anQueue[p->sPoint.iLevel]--; p->bPoint = 0; - }else if( p->nPoint ){ + }else if( ALWAYS(p->nPoint) ){ p->anQueue[p->aPoint[0].iLevel]--; n = --p->nPoint; p->aPoint[0] = p->aPoint[n]; @@ -193964,7 +197636,7 @@ static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){ RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr); int rc = SQLITE_OK; RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); - if( rc==SQLITE_OK && p ){ + if( rc==SQLITE_OK && ALWAYS(p) ){ *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell); } return rc; @@ -193982,7 +197654,7 @@ static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); if( rc ) return rc; - if( p==0 ) return SQLITE_OK; + if( NEVER(p==0) ) return SQLITE_OK; if( i==0 ){ sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell)); }else if( i<=pRtree->nDim2 ){ @@ -194181,8 +197853,11 @@ static int rtreeFilter( } if( rc==SQLITE_OK ){ RtreeSearchPoint *pNew; + assert( pCsr->bPoint==0 ); /* Due to the resetCursor() call above */ pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, (u8)(pRtree->iDepth+1)); - if( pNew==0 ) return SQLITE_NOMEM; + if( NEVER(pNew==0) ){ /* Because pCsr->bPoint was FALSE */ + return SQLITE_NOMEM; + } pNew->id = 1; pNew->iCell = 0; pNew->eWithin = PARTLY_WITHIN; @@ -194259,7 +197934,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii]; if( bMatch==0 && p->usable - && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ + && p->iColumn<=0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){ /* We have an equality constraint on the rowid. Use strategy 1. */ int jj; @@ -194465,7 +198140,7 @@ static int ChooseLeaf( int nCell = NCELL(pNode); RtreeCell cell; - RtreeNode *pChild; + RtreeNode *pChild = 0; RtreeCell *aCell = 0; @@ -194512,12 +198187,19 @@ static int AdjustTree( ){ RtreeNode *p = pNode; int cnt = 0; + int rc; while( p->pParent ){ RtreeNode *pParent = p->pParent; RtreeCell cell; int iCell; - if( (++cnt)>1000 || nodeParentIndex(pRtree, p, &iCell) ){ + cnt++; + if( NEVER(cnt>100) ){ + RTREE_IS_CORRUPT(pRtree); + return SQLITE_CORRUPT_VTAB; + } + rc = nodeParentIndex(pRtree, p, &iCell); + if( NEVER(rc!=SQLITE_OK) ){ RTREE_IS_CORRUPT(pRtree); return SQLITE_CORRUPT_VTAB; } @@ -194806,12 +198488,17 @@ static int updateMapping( xSetMapping = ((iHeight==0)?rowidWrite:parentWrite); if( iHeight>0 ){ RtreeNode *pChild = nodeHashLookup(pRtree, iRowid); + RtreeNode *p; + for(p=pNode; p; p=p->pParent){ + if( p==pChild ) return SQLITE_CORRUPT_VTAB; + } if( pChild ){ nodeRelease(pRtree, pChild->pParent); nodeReference(pNode); pChild->pParent = pNode; } } + if( NEVER(pNode==0) ) return SQLITE_ERROR; return xSetMapping(pRtree, iRowid, pNode->iNode); } @@ -194901,11 +198588,12 @@ static int SplitNode( RtreeNode *pParent = pLeft->pParent; int iCell; rc = nodeParentIndex(pRtree, pLeft, &iCell); - if( rc==SQLITE_OK ){ + if( ALWAYS(rc==SQLITE_OK) ){ nodeOverwriteCell(pRtree, pParent, &leftbbox, iCell); rc = AdjustTree(pRtree, pParent, &leftbbox); + assert( rc==SQLITE_OK ); } - if( rc!=SQLITE_OK ){ + if( NEVER(rc!=SQLITE_OK) ){ goto splitnode_out; } } @@ -194980,7 +198668,7 @@ static int fixLeafParent(Rtree *pRtree, RtreeNode *pLeaf){ */ iNode = sqlite3_column_int64(pRtree->pReadParent, 0); for(pTest=pLeaf; pTest && pTest->iNode!=iNode; pTest=pTest->pParent); - if( !pTest ){ + if( pTest==0 ){ rc2 = nodeAcquire(pRtree, iNode, 0, &pChild->pParent); } } @@ -195011,6 +198699,7 @@ static int removeNode(Rtree *pRtree, RtreeNode *pNode, int iHeight){ pParent = pNode->pParent; pNode->pParent = 0; rc = deleteCell(pRtree, pParent, iCell, iHeight+1); + testcase( rc!=SQLITE_OK ); } rc2 = nodeRelease(pRtree, pParent); if( rc==SQLITE_OK ){ @@ -195233,7 +198922,7 @@ static int rtreeInsertCell( } }else{ rc = AdjustTree(pRtree, pNode, pCell); - if( rc==SQLITE_OK ){ + if( ALWAYS(rc==SQLITE_OK) ){ if( iHeight==0 ){ rc = rowidWrite(pRtree, pCell->iRowid, pNode->iNode); }else{ @@ -195339,7 +199028,7 @@ static int rtreeDeleteRowid(Rtree *pRtree, sqlite3_int64 iDelete){ int rc2; RtreeNode *pChild = 0; i64 iChild = nodeGetRowid(pRtree, pRoot, 0); - rc = nodeAcquire(pRtree, iChild, pRoot, &pChild); + rc = nodeAcquire(pRtree, iChild, pRoot, &pChild); /* tag-20210916a */ if( rc==SQLITE_OK ){ rc = removeNode(pRtree, pChild, pRtree->iDepth-1); } @@ -195674,7 +199363,7 @@ static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){ char *zSql; sqlite3_stmt *p; int rc; - i64 nRow = 0; + i64 nRow = RTREE_MIN_ROWEST; rc = sqlite3_table_column_metadata( db, pRtree->zDb, "sqlite_stat1",0,0,0,0,0,0 @@ -195691,20 +199380,10 @@ static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){ if( rc==SQLITE_OK ){ if( sqlite3_step(p)==SQLITE_ROW ) nRow = sqlite3_column_int64(p, 0); rc = sqlite3_finalize(p); - }else if( rc!=SQLITE_NOMEM ){ - rc = SQLITE_OK; - } - - if( rc==SQLITE_OK ){ - if( nRow==0 ){ - pRtree->nRowEst = RTREE_DEFAULT_ROWEST; - }else{ - pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST); - } } sqlite3_free(zSql); } - + pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST); return rc; } @@ -195854,9 +199533,12 @@ static int rtreeSqlInit( sqlite3_str_appendf(p, "UPDATE \"%w\".\"%w_rowid\"SET ", zDb, zPrefix); for(ii=0; iinAux; ii++){ if( ii ) sqlite3_str_append(p, ",", 1); +#ifdef SQLITE_ENABLE_GEOPOLY if( iinAuxNotNull ){ sqlite3_str_appendf(p,"a%d=coalesce(?%d,a%d)",ii,ii+2,ii); - }else{ + }else +#endif + { sqlite3_str_appendf(p,"a%d=?%d",ii,ii+2); } } @@ -196121,6 +199803,7 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ tree.nDim2 = tree.nDim*2; tree.nBytesPerCell = 8 + 8 * tree.nDim; node.zData = (u8 *)sqlite3_value_blob(apArg[1]); + if( node.zData==0 ) return; nData = sqlite3_value_bytes(apArg[1]); if( nData<4 ) return; if( nDataz[0]) ) p->z++; + while( fast_isspace(p->z[0]) ) p->z++; return p->z[0]; } @@ -196950,13 +200632,14 @@ static GeoPoly *geopolyFuncParam( ){ GeoPoly *p = 0; int nByte; + testcase( pCtx==0 ); if( sqlite3_value_type(pVal)==SQLITE_BLOB && (nByte = sqlite3_value_bytes(pVal))>=(4+6*sizeof(GeoCoord)) ){ const unsigned char *a = sqlite3_value_blob(pVal); int nVertex; if( a==0 ){ - sqlite3_result_error_nomem(pCtx); + if( pCtx ) sqlite3_result_error_nomem(pCtx); return 0; } nVertex = (a[1]<<16) + (a[2]<<8) + a[3]; @@ -197783,11 +201466,11 @@ static int geopolyOverlap(GeoPoly *p1, GeoPoly *p2){ }else{ /* Remove a segment */ if( pActive==pThisEvent->pSeg ){ - pActive = pActive->pNext; + pActive = ALWAYS(pActive) ? pActive->pNext : 0; }else{ for(pSeg=pActive; pSeg; pSeg=pSeg->pNext){ if( pSeg->pNext==pThisEvent->pSeg ){ - pSeg->pNext = pSeg->pNext->pNext; + pSeg->pNext = ALWAYS(pSeg->pNext) ? pSeg->pNext->pNext : 0; break; } } @@ -198031,6 +201714,7 @@ static int geopolyFilter( RtreeCoord bbox[4]; RtreeConstraint *p; assert( argc==1 ); + assert( argv[0]!=0 ); geopolyBBox(0, argv[0], bbox, &rc); if( rc ){ goto geopoly_filter_end; @@ -198258,6 +201942,7 @@ static int geopolyUpdate( || !sqlite3_value_nochange(aData[2]) /* UPDATE _shape */ || oldRowid!=newRowid) /* Rowid change */ ){ + assert( aData[2]!=0 ); geopolyBBox(0, aData[2], cell.aCoord, &rc); if( rc ){ if( rc==SQLITE_ERROR ){ @@ -198611,7 +202296,10 @@ SQLITE_API int sqlite3_rtree_query_callback( /* Allocate and populate the context object. */ pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback)); - if( !pGeomCtx ) return SQLITE_NOMEM; + if( !pGeomCtx ){ + if( xDestructor ) xDestructor(pContext); + return SQLITE_NOMEM; + } pGeomCtx->xGeom = 0; pGeomCtx->xQueryFunc = xQueryFunc; pGeomCtx->xDestructor = xDestructor; @@ -200182,6 +203870,13 @@ SQLITE_API void sqlite3rbu_destroy_vfs(const char *zName); # define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;} #endif +/* +** Name of the URI option that causes RBU to take an exclusive lock as +** part of the incremental checkpoint operation. +*/ +#define RBU_EXCLUSIVE_CHECKPOINT "rbu_exclusive_checkpoint" + + /* ** The rbu_state table is used to save the state of a partially applied ** update so that it can be resumed later. The table consists of integer @@ -201266,7 +204961,9 @@ static void rbuTableType( assert( p->rc==SQLITE_OK ); p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[0], &p->zErrmsg, sqlite3_mprintf( - "SELECT (sql LIKE 'create virtual%%'), rootpage" + "SELECT " + " (sql COLLATE nocase BETWEEN 'CREATE VIRTUAL' AND 'CREATE VIRTUAM')," + " rootpage" " FROM sqlite_schema" " WHERE name=%Q", zTab )); @@ -201626,7 +205323,7 @@ static char *rbuVacuumTableStart( ** the caller has to use an OFFSET clause to extract only the required ** rows from the sourct table, just as it does for an RBU update operation. */ -char *rbuVacuumIndexStart( +static char *rbuVacuumIndexStart( sqlite3rbu *p, /* RBU handle */ RbuObjIter *pIter /* RBU iterator object */ ){ @@ -202799,7 +206496,7 @@ static RbuState *rbuLoadState(sqlite3rbu *p){ break; case RBU_STATE_OALSZ: - pRet->iOalSz = (u32)sqlite3_column_int64(pStmt, 1); + pRet->iOalSz = sqlite3_column_int64(pStmt, 1); break; case RBU_STATE_PHASEONESTEP: @@ -202826,13 +206523,19 @@ static RbuState *rbuLoadState(sqlite3rbu *p){ /* ** Open the database handle and attach the RBU database as "rbu". If an ** error occurs, leave an error code and message in the RBU handle. +** +** If argument dbMain is not NULL, then it is a database handle already +** open on the target database. Use this handle instead of opening a new +** one. */ -static void rbuOpenDatabase(sqlite3rbu *p, int *pbRetry){ +static void rbuOpenDatabase(sqlite3rbu *p, sqlite3 *dbMain, int *pbRetry){ assert( p->rc || (p->dbMain==0 && p->dbRbu==0) ); assert( p->rc || rbuIsVacuum(p) || p->zTarget!=0 ); + assert( dbMain==0 || rbuIsVacuum(p)==0 ); /* Open the RBU database */ p->dbRbu = rbuOpenDbhandle(p, p->zRbu, 1); + p->dbMain = dbMain; if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){ sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p); @@ -203198,15 +206901,31 @@ static void rbuCheckpointFrame(sqlite3rbu *p, RbuFrame *pFrame){ /* -** Take an EXCLUSIVE lock on the database file. +** Take an EXCLUSIVE lock on the database file. Return SQLITE_OK if +** successful, or an SQLite error code otherwise. */ -static void rbuLockDatabase(sqlite3rbu *p){ - sqlite3_file *pReal = p->pTargetFd->pReal; - assert( p->rc==SQLITE_OK ); - p->rc = pReal->pMethods->xLock(pReal, SQLITE_LOCK_SHARED); - if( p->rc==SQLITE_OK ){ - p->rc = pReal->pMethods->xLock(pReal, SQLITE_LOCK_EXCLUSIVE); +static int rbuLockDatabase(sqlite3 *db){ + int rc = SQLITE_OK; + sqlite3_file *fd = 0; + sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); + + if( fd->pMethods ){ + rc = fd->pMethods->xLock(fd, SQLITE_LOCK_SHARED); + if( rc==SQLITE_OK ){ + rc = fd->pMethods->xLock(fd, SQLITE_LOCK_EXCLUSIVE); + } } + return rc; +} + +/* +** Return true if the database handle passed as the only argument +** was opened with the rbu_exclusive_checkpoint=1 URI parameter +** specified. Or false otherwise. +*/ +static int rbuExclusiveCheckpoint(sqlite3 *db){ + const char *zUri = sqlite3_db_filename(db, 0); + return sqlite3_uri_boolean(zUri, RBU_EXCLUSIVE_CHECKPOINT, 0); } #if defined(_WIN32_WCE) @@ -203264,18 +206983,24 @@ static void rbuMoveOalFile(sqlite3rbu *p){ ** In order to ensure that there are no database readers, an EXCLUSIVE ** lock is obtained here before the *-oal is moved to *-wal. */ - rbuLockDatabase(p); - if( p->rc==SQLITE_OK ){ - rbuFileSuffix3(zBase, zWal); - rbuFileSuffix3(zBase, zOal); + sqlite3 *dbMain = 0; + rbuFileSuffix3(zBase, zWal); + rbuFileSuffix3(zBase, zOal); - /* Re-open the databases. */ - rbuObjIterFinalize(&p->objiter); - sqlite3_close(p->dbRbu); - sqlite3_close(p->dbMain); - p->dbMain = 0; - p->dbRbu = 0; + /* Re-open the databases. */ + rbuObjIterFinalize(&p->objiter); + sqlite3_close(p->dbRbu); + sqlite3_close(p->dbMain); + p->dbMain = 0; + p->dbRbu = 0; + + dbMain = rbuOpenDbhandle(p, p->zTarget, 1); + if( dbMain ){ + assert( p->rc==SQLITE_OK ); + p->rc = rbuLockDatabase(dbMain); + } + if( p->rc==SQLITE_OK ){ #if defined(_WIN32_WCE) { LPWSTR zWideOal; @@ -203302,11 +207027,19 @@ static void rbuMoveOalFile(sqlite3rbu *p){ #else p->rc = rename(zOal, zWal) ? SQLITE_IOERR : SQLITE_OK; #endif + } - if( p->rc==SQLITE_OK ){ - rbuOpenDatabase(p, 0); - rbuSetupCheckpoint(p, 0); - } + if( p->rc!=SQLITE_OK + || rbuIsVacuum(p) + || rbuExclusiveCheckpoint(dbMain)==0 + ){ + sqlite3_close(dbMain); + dbMain = 0; + } + + if( p->rc==SQLITE_OK ){ + rbuOpenDatabase(p, dbMain, 0); + rbuSetupCheckpoint(p, 0); } } @@ -204057,9 +207790,9 @@ static sqlite3rbu *openRbuHandle( ** If this is the case, it will have been checkpointed and deleted ** when the handle was closed and a second attempt to open the ** database may succeed. */ - rbuOpenDatabase(p, &bRetry); + rbuOpenDatabase(p, 0, &bRetry); if( bRetry ){ - rbuOpenDatabase(p, 0); + rbuOpenDatabase(p, 0, 0); } } @@ -204154,6 +207887,14 @@ static sqlite3rbu *openRbuHandle( }else if( p->eStage==RBU_STAGE_MOVE ){ /* no-op */ }else if( p->eStage==RBU_STAGE_CKPT ){ + if( !rbuIsVacuum(p) && rbuExclusiveCheckpoint(p->dbMain) ){ + /* If the rbu_exclusive_checkpoint=1 URI parameter was specified + ** and an incremental checkpoint is being resumed, attempt an + ** exclusive lock on the db file. If this fails, so be it. */ + p->eStage = RBU_STAGE_DONE; + rbuLockDatabase(p->dbMain); + p->eStage = RBU_STAGE_CKPT; + } rbuSetupCheckpoint(p, pState); }else if( p->eStage==RBU_STAGE_DONE ){ p->rc = SQLITE_DONE; @@ -204191,7 +207932,6 @@ SQLITE_API sqlite3rbu *sqlite3rbu_open( const char *zState ){ if( zTarget==0 || zRbu==0 ){ return rbuMisuseError(); } - /* TODO: Check that zTarget and zRbu are non-NULL */ return openRbuHandle(zTarget, zRbu, zState); } @@ -205394,6 +209134,15 @@ SQLITE_API sqlite3_int64 sqlite3rbu_temp_size(sqlite3rbu *pRbu){ #if (defined(SQLITE_ENABLE_DBSTAT_VTAB) || defined(SQLITE_TEST)) \ && !defined(SQLITE_OMIT_VIRTUALTABLE) +/* +** The pager and btree modules arrange objects in memory so that there are +** always approximately 200 bytes of addressable memory following each page +** buffer. This way small buffer overreads caused by corrupt database pages +** do not cause undefined behaviour. This module pads each page buffer +** by the following number of bytes for the same purpose. +*/ +#define DBSTAT_PAGE_PADDING_BYTES 256 + /* ** Page paths: ** @@ -205461,9 +209210,8 @@ struct StatCell { /* Size information for a single btree page */ struct StatPage { u32 iPgno; /* Page number */ - DbPage *pPg; /* Page content */ + u8 *aPg; /* Page buffer from sqlite3_malloc() */ int iCell; /* Current cell */ - char *zPath; /* Path to this page */ /* Variables populated by statDecodePage(): */ @@ -205675,18 +209423,25 @@ static void statClearCells(StatPage *p){ } static void statClearPage(StatPage *p){ + u8 *aPg = p->aPg; statClearCells(p); - sqlite3PagerUnref(p->pPg); sqlite3_free(p->zPath); memset(p, 0, sizeof(StatPage)); + p->aPg = aPg; } static void statResetCsr(StatCursor *pCsr){ int i; - sqlite3_reset(pCsr->pStmt); + /* In some circumstances, specifically if an OOM has occurred, the call + ** to sqlite3_reset() may cause the pager to be reset (emptied). It is + ** important that statClearPage() is called to free any page refs before + ** this happens. dbsqlfuzz 9ed3e4e3816219d3509d711636c38542bf3f40b1. */ for(i=0; iaPage); i++){ statClearPage(&pCsr->aPage[i]); + sqlite3_free(pCsr->aPage[i].aPg); + pCsr->aPage[i].aPg = 0; } + sqlite3_reset(pCsr->pStmt); pCsr->iPage = 0; sqlite3_free(pCsr->zPath); pCsr->zPath = 0; @@ -205751,7 +209506,7 @@ static int statDecodePage(Btree *pBt, StatPage *p){ int isLeaf; int szPage; - u8 *aData = sqlite3PagerGetData(p->pPg); + u8 *aData = p->aPg; u8 *aHdr = &aData[p->iPgno==1 ? 100 : 0]; p->flags = aHdr[0]; @@ -205822,7 +209577,7 @@ static int statDecodePage(Btree *pBt, StatPage *p){ if( nPayload>(u32)nLocal ){ int j; int nOvfl = ((nPayload - nLocal) + nUsable-4 - 1) / (nUsable - 4); - if( iOff+nLocal>nUsable || nPayload>0x7fffffff ){ + if( iOff+nLocal+4>nUsable || nPayload>0x7fffffff ){ goto statPageIsCorrupt; } pCell->nLastOvfl = (nPayload-nLocal) - (nOvfl-1) * (nUsable-4); @@ -205881,6 +209636,38 @@ static void statSizeAndOffset(StatCursor *pCsr){ } } +/* +** Load a copy of the page data for page iPg into the buffer belonging +** to page object pPg. Allocate the buffer if necessary. Return SQLITE_OK +** if successful, or an SQLite error code otherwise. +*/ +static int statGetPage( + Btree *pBt, /* Load page from this b-tree */ + u32 iPg, /* Page number to load */ + StatPage *pPg /* Load page into this object */ +){ + int pgsz = sqlite3BtreeGetPageSize(pBt); + DbPage *pDbPage = 0; + int rc; + + if( pPg->aPg==0 ){ + pPg->aPg = (u8*)sqlite3_malloc(pgsz + DBSTAT_PAGE_PADDING_BYTES); + if( pPg->aPg==0 ){ + return SQLITE_NOMEM_BKPT; + } + memset(&pPg->aPg[pgsz], 0, DBSTAT_PAGE_PADDING_BYTES); + } + + rc = sqlite3PagerGet(sqlite3BtreePager(pBt), iPg, &pDbPage, 0); + if( rc==SQLITE_OK ){ + const u8 *a = sqlite3PagerGetData(pDbPage); + memcpy(pPg->aPg, a, pgsz); + sqlite3PagerUnref(pDbPage); + } + + return rc; +} + /* ** Move a DBSTAT cursor to the next entry. Normally, the next ** entry will be the next page, but in aggregated mode (pCsr->isAgg!=0), @@ -205899,7 +209686,7 @@ static int statNext(sqlite3_vtab_cursor *pCursor){ pCsr->zPath = 0; statNextRestart: - if( pCsr->aPage[0].pPg==0 ){ + if( pCsr->iPage<0 ){ /* Start measuring space on the next btree */ statResetCounts(pCsr); rc = sqlite3_step(pCsr->pStmt); @@ -205911,7 +209698,7 @@ static int statNext(sqlite3_vtab_cursor *pCursor){ pCsr->isEof = 1; return sqlite3_reset(pCsr->pStmt); } - rc = sqlite3PagerGet(pPager, iRoot, &pCsr->aPage[0].pPg, 0); + rc = statGetPage(pBt, iRoot, &pCsr->aPage[0]); pCsr->aPage[0].iPgno = iRoot; pCsr->aPage[0].iCell = 0; if( !pCsr->isAgg ){ @@ -205962,9 +209749,8 @@ static int statNext(sqlite3_vtab_cursor *pCursor){ if( !p->iRightChildPg || p->iCell>p->nCell ){ statClearPage(p); - if( pCsr->iPage>0 ){ - pCsr->iPage--; - }else if( pCsr->isAgg ){ + pCsr->iPage--; + if( pCsr->isAgg && pCsr->iPage<0 ){ /* label-statNext-done: When computing aggregate space usage over ** an entire btree, this is the exit point from this function */ return SQLITE_OK; @@ -205983,7 +209769,7 @@ static int statNext(sqlite3_vtab_cursor *pCursor){ }else{ p[1].iPgno = p->aCell[p->iCell].iChildPg; } - rc = sqlite3PagerGet(pPager, p[1].iPgno, &p[1].pPg, 0); + rc = statGetPage(pBt, p[1].iPgno, &p[1]); pCsr->nPage++; p[1].iCell = 0; if( !pCsr->isAgg ){ @@ -206113,6 +209899,7 @@ static int statFilter( } if( rc==SQLITE_OK ){ + pCsr->iPage = -1; rc = statNext(pCursor); } return rc; @@ -206381,6 +210168,7 @@ static int dbpageBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ ){ pIdxInfo->orderByConsumed = 1; } + sqlite3VtabWriteAll(pIdxInfo); return SQLITE_OK; } @@ -207064,7 +210852,7 @@ static int sessionSerializeValue( if( aBuf ){ sessionVarintPut(&aBuf[1], n); - if( n ) memcpy(&aBuf[nVarint + 1], z, n); + if( n>0 ) memcpy(&aBuf[nVarint + 1], z, n); } nByte = 1 + nVarint + n; @@ -207669,16 +211457,32 @@ static int sessionTableInfo( }else if( rc==SQLITE_ERROR ){ zPragma = sqlite3_mprintf(""); }else{ + *pazCol = 0; + *pabPK = 0; + *pnCol = 0; + if( pzTab ) *pzTab = 0; return rc; } }else{ zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis); } - if( !zPragma ) return SQLITE_NOMEM; + if( !zPragma ){ + *pazCol = 0; + *pabPK = 0; + *pnCol = 0; + if( pzTab ) *pzTab = 0; + return SQLITE_NOMEM; + } rc = sqlite3_prepare_v2(db, zPragma, -1, &pStmt, 0); sqlite3_free(zPragma); - if( rc!=SQLITE_OK ) return rc; + if( rc!=SQLITE_OK ){ + *pazCol = 0; + *pabPK = 0; + *pnCol = 0; + if( pzTab ) *pzTab = 0; + return rc; + } nByte = nThis + 1; while( SQLITE_ROW==sqlite3_step(pStmt) ){ @@ -208096,7 +211900,11 @@ static int sessionFindTable( ){ rc = sqlite3session_attach(pSession, zName); if( rc==SQLITE_OK ){ - for(pRet=pSession->pTable; pRet->pNext; pRet=pRet->pNext); + pRet = pSession->pTable; + while( ALWAYS(pRet) && pRet->pNext ){ + pRet = pRet->pNext; + } + assert( pRet!=0 ); assert( 0==sqlite3_strnicmp(pRet->zName, zName, nName+1) ); } } @@ -208869,6 +212677,7 @@ static int sessionAppendUpdate( int i; /* Used to iterate through columns */ u8 *pCsr = p->aRecord; /* Used to iterate through old.* values */ + assert( abPK!=0 ); sessionAppendByte(pBuf, SQLITE_UPDATE, &rc); sessionAppendByte(pBuf, p->bIndirect, &rc); for(i=0; ipTable; rc==SQLITE_OK && pTab; pTab=pTab->pNext){ if( pTab->nEntry ){ const char *zName = pTab->zName; - int nCol; /* Number of columns in table */ - u8 *abPK; /* Primary key array */ + int nCol = 0; /* Number of columns in table */ + u8 *abPK = 0; /* Primary key array */ const char **azCol = 0; /* Table columns */ int i; /* Used to iterate through hash buckets */ sqlite3_stmt *pSel = 0; /* SELECT statement to query table pTab */ @@ -209231,6 +213042,7 @@ static int sessionGenerateChangeset( sessionAppendCol(&buf, pSel, iCol, &rc); } }else{ + assert( abPK!=0 ); /* Because sessionSelectStmt() returned ok */ rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, abPK); } }else if( p->op!=SQLITE_INSERT ){ @@ -209291,7 +213103,10 @@ SQLITE_API int sqlite3session_changeset( int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ void **ppChangeset /* OUT: Buffer containing changeset */ ){ - int rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset,ppChangeset); + int rc; + + if( pnChangeset==0 || ppChangeset==0 ) return SQLITE_MISUSE; + rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset,ppChangeset); assert( rc || pnChangeset==0 || pSession->bEnableSize==0 || *pnChangeset<=pSession->nMaxChangesetSize ); @@ -209306,6 +213121,7 @@ SQLITE_API int sqlite3session_changeset_strm( int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ){ + if( xOutput==0 ) return SQLITE_MISUSE; return sessionGenerateChangeset(pSession, 0, xOutput, pOut, 0, 0); } @@ -209317,6 +213133,7 @@ SQLITE_API int sqlite3session_patchset_strm( int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ){ + if( xOutput==0 ) return SQLITE_MISUSE; return sessionGenerateChangeset(pSession, 1, xOutput, pOut, 0, 0); } @@ -209332,6 +213149,7 @@ SQLITE_API int sqlite3session_patchset( int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */ void **ppPatchset /* OUT: Buffer containing changeset */ ){ + if( pnPatchset==0 || ppPatchset==0 ) return SQLITE_MISUSE; return sessionGenerateChangeset(pSession, 1, 0, 0, pnPatchset, ppPatchset); } @@ -210295,11 +214113,11 @@ static int sessionChangesetInvert( } assert( rc==SQLITE_OK ); - if( pnInverted ){ + if( pnInverted && ALWAYS(ppInverted) ){ *pnInverted = sOut.nBuf; *ppInverted = sOut.aBuf; sOut.aBuf = 0; - }else if( sOut.nBuf>0 ){ + }else if( sOut.nBuf>0 && ALWAYS(xOutput!=0) ){ rc = xOutput(pOut, sOut.aBuf, sOut.nBuf); } @@ -210755,7 +214573,7 @@ static int sessionBindRow( for(i=0; rc==SQLITE_OK && i0 ) rc = xOutput(pOut, buf.aBuf, buf.nBuf); - }else{ + }else if( ppOut ){ *ppOut = buf.aBuf; - *pnOut = buf.nBuf; + if( pnOut ) *pnOut = buf.nBuf; buf.aBuf = 0; } } @@ -212300,7 +216118,7 @@ static int sessionRebase( if( sOut.nBuf>0 ){ rc = xOutput(pOut, sOut.aBuf, sOut.nBuf); } - }else{ + }else if( ppOut ){ *ppOut = (void*)sOut.aBuf; *pnOut = sOut.nBuf; sOut.aBuf = 0; @@ -213043,8 +216861,20 @@ typedef sqlite3_uint64 u64; #endif #define testcase(x) -#define ALWAYS(x) 1 -#define NEVER(x) 0 + +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST) +# define SQLITE_OMIT_AUXILIARY_SAFETY_CHECKS 1 +#endif +#if defined(SQLITE_OMIT_AUXILIARY_SAFETY_CHECKS) +# define ALWAYS(X) (1) +# define NEVER(X) (0) +#elif !defined(NDEBUG) +# define ALWAYS(X) ((X)?1:(assert(0),0)) +# define NEVER(X) ((X)?(assert(0),1):0) +#else +# define ALWAYS(X) (X) +# define NEVER(X) (X) +#endif #define MIN(x,y) (((x) < (y)) ? (x) : (y)) #define MAX(x,y) (((x) > (y)) ? (x) : (y)) @@ -213104,7 +216934,7 @@ SQLITE_API extern int sqlite3_fts5_may_be_corrupt; ** A version of memcmp() that does not cause asan errors if one of the pointer ** parameters is NULL and the number of bytes to compare is zero. */ -#define fts5Memcmp(s1, s2, n) ((n)==0 ? 0 : memcmp((s1), (s2), (n))) +#define fts5Memcmp(s1, s2, n) ((n)<=0 ? 0 : memcmp((s1), (s2), (n))) /* Mark a function parameter as unused, to suppress nuisance compiler ** warnings. */ @@ -213443,6 +217273,9 @@ static void sqlite3Fts5IndexCloseReader(Fts5Index*); */ static const char *sqlite3Fts5IterTerm(Fts5IndexIter*, int*); static int sqlite3Fts5IterNextScan(Fts5IndexIter*); +static void *sqlite3Fts5StructureRef(Fts5Index*); +static void sqlite3Fts5StructureRelease(void*); +static int sqlite3Fts5StructureTest(Fts5Index*, void*); /* @@ -214220,9 +218053,9 @@ struct fts5yyParser { }; typedef struct fts5yyParser fts5yyParser; +/* #include */ #ifndef NDEBUG /* #include */ -/* #include */ static FILE *fts5yyTraceFILE = 0; static char *fts5yyTracePrompt = 0; #endif /* NDEBUG */ @@ -215159,8 +218992,8 @@ static void sqlite3Fts5Parser( fts5yyact = fts5yy_find_shift_action((fts5YYCODETYPE)fts5yymajor,fts5yyact); if( fts5yyact >= fts5YY_MIN_REDUCE ){ unsigned int fts5yyruleno = fts5yyact - fts5YY_MIN_REDUCE; /* Reduce by this rule */ - assert( fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ); #ifndef NDEBUG + assert( fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ); if( fts5yyTraceFILE ){ int fts5yysize = fts5yyRuleInfoNRhs[fts5yyruleno]; if( fts5yysize ){ @@ -215258,14 +219091,13 @@ static void sqlite3Fts5Parser( fts5yy_destructor(fts5yypParser, (fts5YYCODETYPE)fts5yymajor, &fts5yyminorunion); fts5yymajor = fts5YYNOCODE; }else{ - while( fts5yypParser->fts5yytos >= fts5yypParser->fts5yystack - && (fts5yyact = fts5yy_find_reduce_action( - fts5yypParser->fts5yytos->stateno, - fts5YYERRORSYMBOL)) > fts5YY_MAX_SHIFTREDUCE - ){ + while( fts5yypParser->fts5yytos > fts5yypParser->fts5yystack ){ + fts5yyact = fts5yy_find_reduce_action(fts5yypParser->fts5yytos->stateno, + fts5YYERRORSYMBOL); + if( fts5yyact<=fts5YY_MAX_SHIFTREDUCE ) break; fts5yy_pop_parser_stack(fts5yypParser); } - if( fts5yypParser->fts5yytos < fts5yypParser->fts5yystack || fts5yymajor==0 ){ + if( fts5yypParser->fts5yytos <= fts5yypParser->fts5yystack || fts5yymajor==0 ){ fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion); fts5yy_parse_failed(fts5yypParser); #ifndef fts5YYNOERRORRECOVERY @@ -216128,7 +219960,6 @@ static void sqlite3Fts5BufferAppendBlob( u32 nData, const u8 *pData ){ - assert_nc( *pRc || nData>=0 ); if( nData ){ if( fts5BufferGrow(pRc, pBuf, nData) ) return; memcpy(&pBuf->p[pBuf->n], pData, nData); @@ -216238,9 +220069,8 @@ static int sqlite3Fts5PoslistNext64( return 1; }else{ i64 iOff = *piOff; - int iVal; + u32 iVal; fts5FastGetVarint32(a, i, iVal); - assert( iVal>=0 ); if( iVal<=1 ){ if( iVal==0 ){ *pi = i; @@ -216248,6 +220078,7 @@ static int sqlite3Fts5PoslistNext64( } fts5FastGetVarint32(a, i, iVal); iOff = ((i64)iVal) << 32; + assert( iOff>=0 ); fts5FastGetVarint32(a, i, iVal); if( iVal<2 ){ /* This is a corrupt record. So stop parsing it here. */ @@ -216259,7 +220090,7 @@ static int sqlite3Fts5PoslistNext64( *piOff = (iOff & (i64)0x7FFFFFFF<<32)+((iOff + (iVal-2)) & 0x7FFFFFFF); } *pi = i; - assert( *piOff>=iOff ); + assert_nc( *piOff>=iOff ); return 0; } } @@ -217034,6 +220865,7 @@ static int sqlite3Fts5ConfigParse( z = fts5ConfigSkipWhitespace(z); if( z && *z=='=' ){ bOption = 1; + assert( zOne!=0 ); z++; if( bMustBeCol ) z = 0; } @@ -217050,7 +220882,11 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; }else{ if( bOption ){ - rc = fts5ConfigParseSpecial(pGlobal, pRet, zOne, zTwo?zTwo:"", pzErr); + rc = fts5ConfigParseSpecial(pGlobal, pRet, + ALWAYS(zOne)?zOne:"", + zTwo?zTwo:"", + pzErr + ); }else{ rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr); zOne = 0; @@ -217568,6 +221404,7 @@ static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){ va_list ap; va_start(ap, zFmt); if( pParse->rc==SQLITE_OK ){ + assert( pParse->zErr==0 ); pParse->zErr = sqlite3_vmprintf(zFmt, ap); pParse->rc = SQLITE_ERROR; } @@ -217866,6 +221703,7 @@ static i64 fts5ExprSynonymRowid(Fts5ExprTerm *pTerm, int bDesc, int *pbEof){ int bRetValid = 0; Fts5ExprTerm *p; + assert( pTerm ); assert( pTerm->pSynonym ); assert( bDesc==0 || bDesc==1 ); for(p=pTerm; p; p=p->pSynonym){ @@ -219306,7 +223144,7 @@ static int sqlite3Fts5ExprClonePhrase( sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); } - if( rc==SQLITE_OK ){ + if( rc==SQLITE_OK && ALWAYS(sCtx.pPhrase) ){ /* All the allocations succeeded. Put the expression object together. */ pNew->pIndex = pExpr->pIndex; pNew->pConfig = pExpr->pConfig; @@ -219577,9 +223415,8 @@ static void sqlite3Fts5ParseSetColset( ){ Fts5Colset *pFree = pColset; if( pParse->pConfig->eDetail==FTS5_DETAIL_NONE ){ - pParse->rc = SQLITE_ERROR; - pParse->zErr = sqlite3_mprintf( - "fts5: column queries are not supported (detail=none)" + sqlite3Fts5ParseError(pParse, + "fts5: column queries are not supported (detail=none)" ); }else{ fts5ParseSetColset(pParse, pExpr, pColset, &pFree); @@ -219753,13 +223590,10 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( || pPhrase->nTerm>1 || (pPhrase->nTerm>0 && pPhrase->aTerm[0].bFirst) ){ - assert( pParse->rc==SQLITE_OK ); - pParse->rc = SQLITE_ERROR; - assert( pParse->zErr==0 ); - pParse->zErr = sqlite3_mprintf( + sqlite3Fts5ParseError(pParse, "fts5: %s queries are not supported (detail!=full)", pNear->nPhrase==1 ? "phrase": "NEAR" - ); + ); sqlite3_free(pRet); pRet = 0; } @@ -220291,6 +224125,15 @@ struct Fts5PoslistPopulator { int bMiss; }; +/* +** Clear the position lists associated with all phrases in the expression +** passed as the first argument. Argument bLive is true if the expression +** might be pointing to a real entry, otherwise it has just been reset. +** +** At present this function is only used for detail=col and detail=none +** fts5 tables. This implies that all phrases must be at most 1 token +** in size, as phrase matches are not supported without detail=full. +*/ static Fts5PoslistPopulator *sqlite3Fts5ExprClearPoslists(Fts5Expr *pExpr, int bLive){ Fts5PoslistPopulator *pRet; pRet = sqlite3_malloc64(sizeof(Fts5PoslistPopulator)*pExpr->nPhrase); @@ -220300,7 +224143,7 @@ static Fts5PoslistPopulator *sqlite3Fts5ExprClearPoslists(Fts5Expr *pExpr, int b for(i=0; inPhrase; i++){ Fts5Buffer *pBuf = &pExpr->apExprPhrase[i]->poslist; Fts5ExprNode *pNode = pExpr->apExprPhrase[i]->pNode; - assert( pExpr->apExprPhrase[i]->nTerm==1 ); + assert( pExpr->apExprPhrase[i]->nTerm<=1 ); if( bLive && (pBuf->n==0 || pNode->iRowid!=pExpr->pRoot->iRowid || pNode->bEof) ){ @@ -220851,7 +224694,7 @@ static int sqlite3Fts5HashWrite( p->bContent = 1; }else{ /* Append a new column value, if necessary */ - assert( iCol>=p->iCol ); + assert_nc( iCol>=p->iCol ); if( iCol!=p->iCol ){ if( pHash->eDetail==FTS5_DETAIL_FULL ){ pPtr[p->nData++] = 0x01; @@ -221656,8 +225499,11 @@ static int fts5BufferCompareBlob( ** res = *pLeft - *pRight */ static int fts5BufferCompare(Fts5Buffer *pLeft, Fts5Buffer *pRight){ - int nCmp = MIN(pLeft->n, pRight->n); - int res = fts5Memcmp(pLeft->p, pRight->p, nCmp); + int nCmp, res; + nCmp = MIN(pLeft->n, pRight->n); + assert( nCmp<=0 || pLeft->p!=0 ); + assert( nCmp<=0 || pRight->p!=0 ); + res = fts5Memcmp(pLeft->p, pRight->p, nCmp); return (res==0 ? (pLeft->n - pRight->n) : res); } @@ -221753,6 +225599,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ return pRet; } + /* ** Release a reference to data record returned by an earlier call to ** fts5DataRead(). @@ -221877,6 +225724,58 @@ static void fts5StructureRef(Fts5Structure *pStruct){ pStruct->nRef++; } +static void *sqlite3Fts5StructureRef(Fts5Index *p){ + fts5StructureRef(p->pStruct); + return (void*)p->pStruct; +} +static void sqlite3Fts5StructureRelease(void *p){ + if( p ){ + fts5StructureRelease((Fts5Structure*)p); + } +} +static int sqlite3Fts5StructureTest(Fts5Index *p, void *pStruct){ + if( p->pStruct!=(Fts5Structure*)pStruct ){ + return SQLITE_ABORT; + } + return SQLITE_OK; +} + +/* +** Ensure that structure object (*pp) is writable. +** +** This function is a no-op if (*pRc) is not SQLITE_OK when it is called. If +** an error occurs, (*pRc) is set to an SQLite error code before returning. +*/ +static void fts5StructureMakeWritable(int *pRc, Fts5Structure **pp){ + Fts5Structure *p = *pp; + if( *pRc==SQLITE_OK && p->nRef>1 ){ + i64 nByte = sizeof(Fts5Structure)+(p->nLevel-1)*sizeof(Fts5StructureLevel); + Fts5Structure *pNew; + pNew = (Fts5Structure*)sqlite3Fts5MallocZero(pRc, nByte); + if( pNew ){ + int i; + memcpy(pNew, p, nByte); + for(i=0; inLevel; i++) pNew->aLevel[i].aSeg = 0; + for(i=0; inLevel; i++){ + Fts5StructureLevel *pLvl = &pNew->aLevel[i]; + nByte = sizeof(Fts5StructureSegment) * pNew->aLevel[i].nSeg; + pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(pRc, nByte); + if( pLvl->aSeg==0 ){ + for(i=0; inLevel; i++){ + sqlite3_free(pNew->aLevel[i].aSeg); + } + sqlite3_free(pNew); + return; + } + memcpy(pLvl->aSeg, p->aLevel[i].aSeg, nByte); + } + p->nRef--; + pNew->nRef = 1; + } + *pp = pNew; + } +} + /* ** Deserialize and return the structure record currently stored in serialized ** form within buffer pData/nData. @@ -221978,9 +225877,11 @@ static int fts5StructureDecode( } /* -** +** Add a level to the Fts5Structure.aLevel[] array of structure object +** (*ppStruct). */ static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){ + fts5StructureMakeWritable(pRc, ppStruct); if( *pRc==SQLITE_OK ){ Fts5Structure *pStruct = *ppStruct; int nLevel = pStruct->nLevel; @@ -222774,6 +226675,7 @@ static void fts5SegIterInit( if( p->rc==SQLITE_OK ){ pIter->iLeafOffset = 4; + assert( pIter->pLeaf!=0 ); assert_nc( pIter->pLeaf->nn>4 ); assert_nc( fts5LeafFirstTermOff(pIter->pLeaf)==4 ); pIter->iPgidxOff = pIter->pLeaf->szLeaf+1; @@ -222876,8 +226778,12 @@ static void fts5SegIterReverseNewPage(Fts5Index *p, Fts5SegIter *pIter){ int iRowidOff; iRowidOff = fts5LeafFirstRowidOff(pNew); if( iRowidOff ){ - pIter->pLeaf = pNew; - pIter->iLeafOffset = iRowidOff; + if( iRowidOff>=pNew->szLeaf ){ + p->rc = FTS5_CORRUPT; + }else{ + pIter->pLeaf = pNew; + pIter->iLeafOffset = iRowidOff; + } } } @@ -223157,7 +227063,7 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ if( pDlidx ){ int iSegid = pIter->pSeg->iSegid; pgnoLast = fts5DlidxIterPgno(pDlidx); - pLast = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, pgnoLast)); + pLast = fts5LeafRead(p, FTS5_SEGMENT_ROWID(iSegid, pgnoLast)); }else{ Fts5Data *pLeaf = pIter->pLeaf; /* Current leaf data */ @@ -223184,7 +227090,7 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ ** forward to find the page containing the last rowid. */ for(pgno=pIter->iLeafPgno+1; !p->rc && pgno<=pSeg->pgnoLast; pgno++){ i64 iAbs = FTS5_SEGMENT_ROWID(pSeg->iSegid, pgno); - Fts5Data *pNew = fts5DataRead(p, iAbs); + Fts5Data *pNew = fts5LeafRead(p, iAbs); if( pNew ){ int iRowid, bTermless; iRowid = fts5LeafFirstRowidOff(pNew); @@ -223215,6 +227121,10 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ pIter->pLeaf = pLast; pIter->iLeafPgno = pgnoLast; iOff = fts5LeafFirstRowidOff(pLast); + if( iOff>pLast->szLeaf ){ + p->rc = FTS5_CORRUPT; + return; + } iOff += fts5GetVarint(&pLast->p[iOff], (u64*)&pIter->iRowid); pIter->iLeafOffset = iOff; @@ -223223,7 +227133,6 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ }else{ pIter->iEndofDoclist = fts5LeafFirstTermOff(pLast); } - } fts5SegIterReverseInitPage(p, pIter); @@ -223275,21 +227184,20 @@ static void fts5LeafSeek( Fts5SegIter *pIter, /* Iterator to seek */ const u8 *pTerm, int nTerm /* Term to search for */ ){ - int iOff; + u32 iOff; const u8 *a = pIter->pLeaf->p; - int szLeaf = pIter->pLeaf->szLeaf; - int n = pIter->pLeaf->nn; + u32 n = (u32)pIter->pLeaf->nn; u32 nMatch = 0; u32 nKeep = 0; u32 nNew = 0; u32 iTermOff; - int iPgidx; /* Current offset in pgidx */ + u32 iPgidx; /* Current offset in pgidx */ int bEndOfPage = 0; assert( p->rc==SQLITE_OK ); - iPgidx = szLeaf; + iPgidx = (u32)pIter->pLeaf->szLeaf; iPgidx += fts5GetVarint32(&a[iPgidx], iTermOff); iOff = iTermOff; if( iOff>n ){ @@ -223355,15 +227263,15 @@ static void fts5LeafSeek( if( pIter->pLeaf==0 ) return; a = pIter->pLeaf->p; if( fts5LeafIsTermless(pIter->pLeaf)==0 ){ - iPgidx = pIter->pLeaf->szLeaf; + iPgidx = (u32)pIter->pLeaf->szLeaf; iPgidx += fts5GetVarint32(&pIter->pLeaf->p[iPgidx], iOff); - if( iOff<4 || iOff>=pIter->pLeaf->szLeaf ){ + if( iOff<4 || (i64)iOff>=pIter->pLeaf->szLeaf ){ p->rc = FTS5_CORRUPT; return; }else{ nKeep = 0; iTermOff = iOff; - n = pIter->pLeaf->nn; + n = (u32)pIter->pLeaf->nn; iOff += fts5GetVarint32(&a[iOff], nNew); break; } @@ -223731,7 +227639,7 @@ static void fts5SegIterGotoPage( fts5SegIterNextPage(p, pIter); assert( p->rc!=SQLITE_OK || pIter->iLeafPgno==iLeafPgno ); - if( p->rc==SQLITE_OK ){ + if( p->rc==SQLITE_OK && ALWAYS(pIter->pLeaf!=0) ){ int iOff; u8 *a = pIter->pLeaf->p; int n = pIter->pLeaf->szLeaf; @@ -224163,7 +228071,11 @@ static void fts5SegiterPoslist( Fts5Colset *pColset, Fts5Buffer *pBuf ){ + assert( pBuf!=0 ); + assert( pSeg!=0 ); if( 0==fts5BufferGrow(&p->rc, pBuf, pSeg->nPos+FTS5_DATA_ZERO_PADDING) ){ + assert( pBuf->p!=0 ); + assert( pBuf->nSpace >= pBuf->n+pSeg->nPos+FTS5_DATA_ZERO_PADDING ); memset(&pBuf->p[pBuf->n+pSeg->nPos], 0, FTS5_DATA_ZERO_PADDING); if( pColset==0 ){ fts5ChunkIterate(p, pSeg, (void*)pBuf, fts5PoslistCallback); @@ -224387,6 +228299,7 @@ static void fts5IterSetOutputs_Full(Fts5Iter *pIter, Fts5SegIter *pSeg){ } static void fts5IterSetOutputCb(int *pRc, Fts5Iter *pIter){ + assert( pIter!=0 || (*pRc)!=SQLITE_OK ); if( *pRc==SQLITE_OK ){ Fts5Config *pConfig = pIter->pIndex->pConfig; if( pConfig->eDetail==FTS5_DETAIL_NONE ){ @@ -224458,7 +228371,10 @@ static void fts5MultiIterNew( } } *ppOut = pNew = fts5MultiIterAlloc(p, nSeg); - if( pNew==0 ) return; + if( pNew==0 ){ + assert( p->rc!=SQLITE_OK ); + goto fts5MultiIterNew_post_check; + } pNew->bRev = (0!=(flags & FTS5INDEX_QUERY_DESC)); pNew->bSkipEmpty = (0!=(flags & FTS5INDEX_QUERY_SKIPEMPTY)); pNew->pColset = pColset; @@ -224522,6 +228438,10 @@ static void fts5MultiIterNew( fts5MultiIterFree(pNew); *ppOut = 0; } + +fts5MultiIterNew_post_check: + assert( (*ppOut)!=0 || p->rc!=SQLITE_OK ); + return; } /* @@ -224569,7 +228489,8 @@ static void fts5MultiIterNew2( ** False otherwise. */ static int fts5MultiIterEof(Fts5Index *p, Fts5Iter *pIter){ - assert( p->rc + assert( pIter!=0 || p->rc!=SQLITE_OK ); + assert( p->rc!=SQLITE_OK || (pIter->aSeg[ pIter->aFirst[1].iFirst ].pLeaf==0)==pIter->base.bEof ); return (p->rc || pIter->base.bEof); @@ -225373,6 +229294,7 @@ static void fts5IndexMergeLevel( ** and last leaf page number at the same time. */ fts5WriteFinish(p, &writer, &pSeg->pgnoLast); + assert( pIter!=0 || p->rc!=SQLITE_OK ); if( fts5MultiIterEof(p, pIter) ){ int i; @@ -225473,7 +229395,7 @@ static void fts5IndexAutomerge( Fts5Structure **ppStruct, /* IN/OUT: Current structure of index */ int nLeaf /* Number of output leaves just written */ ){ - if( p->rc==SQLITE_OK && p->pConfig->nAutomerge>0 ){ + if( p->rc==SQLITE_OK && p->pConfig->nAutomerge>0 && ALWAYS((*ppStruct)!=0) ){ Fts5Structure *pStruct = *ppStruct; u64 nWrite; /* Initial value of write-counter */ int nWork; /* Number of work-quanta to perform */ @@ -226542,7 +230464,7 @@ static int sqlite3Fts5IndexQuery( if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){ int iIdx = 0; /* Index to search */ int iPrefixIdx = 0; /* +1 prefix index */ - if( nToken ) memcpy(&buf.p[1], pToken, nToken); + if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken); /* Figure out which index to search and set iIdx accordingly. If this ** is a prefix query for which there is no prefix index, set iIdx to @@ -226583,11 +230505,15 @@ static int sqlite3Fts5IndexQuery( /* Scan multiple terms in the main index */ int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0; fts5SetupPrefixIter(p, bDesc, iPrefixIdx, buf.p, nToken+1, pColset,&pRet); - assert( p->rc!=SQLITE_OK || pRet->pColset==0 ); - fts5IterSetOutputCb(&p->rc, pRet); - if( p->rc==SQLITE_OK ){ - Fts5SegIter *pSeg = &pRet->aSeg[pRet->aFirst[1].iFirst]; - if( pSeg->pLeaf ) pRet->xSetOutputs(pRet, pSeg); + if( pRet==0 ){ + assert( p->rc!=SQLITE_OK ); + }else{ + assert( pRet->pColset==0 ); + fts5IterSetOutputCb(&p->rc, pRet); + if( p->rc==SQLITE_OK ){ + Fts5SegIter *pSeg = &pRet->aSeg[pRet->aFirst[1].iFirst]; + if( pSeg->pLeaf ) pRet->xSetOutputs(pRet, pSeg); + } } } @@ -226835,7 +230761,7 @@ static int fts5QueryCksum( Fts5IndexIter *pIter = 0; int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter); - while( rc==SQLITE_OK && 0==sqlite3Fts5IterEof(pIter) ){ + while( rc==SQLITE_OK && ALWAYS(pIter!=0) && 0==sqlite3Fts5IterEof(pIter) ){ i64 rowid = pIter->iRowid; if( eDetail==FTS5_DETAIL_NONE ){ @@ -227200,6 +231126,7 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum Fts5Buffer poslist = {0,0,0}; /* Buffer used to hold a poslist */ Fts5Iter *pIter; /* Used to iterate through entire index */ Fts5Structure *pStruct; /* Index structure */ + int iLvl, iSeg; #ifdef SQLITE_DEBUG /* Used by extra internal tests only run if NDEBUG is not defined */ @@ -227210,15 +231137,16 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum /* Load the FTS index structure */ pStruct = fts5StructureRead(p); + if( pStruct==0 ){ + assert( p->rc!=SQLITE_OK ); + return fts5IndexReturn(p); + } /* Check that the internal nodes of each segment match the leaves */ - if( pStruct ){ - int iLvl, iSeg; - for(iLvl=0; iLvlnLevel; iLvl++){ - for(iSeg=0; iSegaLevel[iLvl].nSeg; iSeg++){ - Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; - fts5IndexIntegrityCheckSegment(p, pSeg); - } + for(iLvl=0; iLvlnLevel; iLvl++){ + for(iSeg=0; iSegaLevel[iLvl].nSeg; iSeg++){ + Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; + fts5IndexIntegrityCheckSegment(p, pSeg); } } @@ -228595,7 +232523,7 @@ static int fts5SorterNext(Fts5Cursor *pCsr){ rc = sqlite3_step(pSorter->pStmt); if( rc==SQLITE_DONE ){ rc = SQLITE_OK; - CsrFlagSet(pCsr, FTS5CSR_EOF); + CsrFlagSet(pCsr, FTS5CSR_EOF|FTS5CSR_REQUIRE_CONTENT); }else if( rc==SQLITE_ROW ){ const u8 *a; const u8 *aBlob; @@ -229165,7 +233093,8 @@ static int fts5FilterMethod( pTab->pStorage, fts5StmtType(pCsr), &pCsr->pStmt, &pTab->p.base.zErrMsg ); if( rc==SQLITE_OK ){ - if( pCsr->ePlan==FTS5_PLAN_ROWID ){ + if( pRowidEq!=0 ){ + assert( pCsr->ePlan==FTS5_PLAN_ROWID ); sqlite3_bind_value(pCsr->pStmt, 1, pRowidEq); }else{ sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iFirstRowid); @@ -230583,7 +234512,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2021-06-18 18:36:39 5c9a6c06871cb9fe42814af9c039eb6da5427a6ec28f187af7ebfb62eafa66e5", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2022-05-06 15:25:27 78d9c993d404cdfaa7fdd2973fa1052e3da9f66215cff9c5540ebe55c407d9fe", -1, SQLITE_TRANSIENT); } /* @@ -231134,12 +235063,16 @@ static int fts5StorageDeleteFromIndex( if( pConfig->abUnindexed[iCol-1]==0 ){ const char *zText; int nText; + assert( pSeek==0 || apVal==0 ); + assert( pSeek!=0 || apVal!=0 ); if( pSeek ){ zText = (const char*)sqlite3_column_text(pSeek, iCol); nText = sqlite3_column_bytes(pSeek, iCol); - }else{ + }else if( ALWAYS(apVal) ){ zText = (const char*)sqlite3_value_text(apVal[iCol-1]); nText = sqlite3_value_bytes(apVal[iCol-1]); + }else{ + continue; } ctx.szCol = 0; rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, @@ -231775,8 +235708,9 @@ static int sqlite3Fts5StorageDocsize(Fts5Storage *p, i64 iRowid, int *aCol){ assert( p->pConfig->bColumnsize ); rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP_DOCSIZE, &pLookup, 0); - if( rc==SQLITE_OK ){ + if( pLookup ){ int bCorrupt = 1; + assert( rc==SQLITE_OK ); sqlite3_bind_int64(pLookup, 1, iRowid); if( SQLITE_ROW==sqlite3_step(pLookup) ){ const u8 *aBlob = sqlite3_column_blob(pLookup, 0); @@ -231789,6 +235723,8 @@ static int sqlite3Fts5StorageDocsize(Fts5Storage *p, i64 iRowid, int *aCol){ if( bCorrupt && rc==SQLITE_OK ){ rc = FTS5_CORRUPT; } + }else{ + assert( rc!=SQLITE_OK ); } return rc; @@ -234479,6 +238415,7 @@ struct Fts5VocabCursor { int bEof; /* True if this cursor is at EOF */ Fts5IndexIter *pIter; /* Term/rowid iterator object */ + void *pStruct; /* From sqlite3Fts5StructureRef() */ int nLeTerm; /* Size of zLeTerm in bytes */ char *zLeTerm; /* (term <= $zLeTerm) paramater, or NULL */ @@ -234792,7 +238729,7 @@ static int fts5VocabOpenMethod( } if( rc==SQLITE_OK ){ - int nByte = pFts5->pConfig->nCol * sizeof(i64)*2 + sizeof(Fts5VocabCursor); + i64 nByte = pFts5->pConfig->nCol * sizeof(i64)*2 + sizeof(Fts5VocabCursor); pCsr = (Fts5VocabCursor*)sqlite3Fts5MallocZero(&rc, nByte); } @@ -234812,6 +238749,8 @@ static int fts5VocabOpenMethod( static void fts5VocabResetCursor(Fts5VocabCursor *pCsr){ pCsr->rowid = 0; sqlite3Fts5IterClose(pCsr->pIter); + sqlite3Fts5StructureRelease(pCsr->pStruct); + pCsr->pStruct = 0; pCsr->pIter = 0; sqlite3_free(pCsr->zLeTerm); pCsr->nLeTerm = -1; @@ -234889,9 +238828,11 @@ static int fts5VocabInstanceNext(Fts5VocabCursor *pCsr){ static int fts5VocabNextMethod(sqlite3_vtab_cursor *pCursor){ Fts5VocabCursor *pCsr = (Fts5VocabCursor*)pCursor; Fts5VocabTable *pTab = (Fts5VocabTable*)pCursor->pVtab; - int rc = SQLITE_OK; int nCol = pCsr->pFts5->pConfig->nCol; + int rc; + rc = sqlite3Fts5StructureTest(pCsr->pFts5->pIndex, pCsr->pStruct); + if( rc!=SQLITE_OK ) return rc; pCsr->rowid++; if( pTab->eType==FTS5_VOCAB_INSTANCE ){ @@ -235065,6 +239006,9 @@ static int fts5VocabFilterMethod( if( rc==SQLITE_OK ){ Fts5Index *pIndex = pCsr->pFts5->pIndex; rc = sqlite3Fts5IndexQuery(pIndex, zTerm, nTerm, f, 0, &pCsr->pIter); + if( rc==SQLITE_OK ){ + pCsr->pStruct = sqlite3Fts5StructureRef(pIndex); + } } if( rc==SQLITE_OK && eType==FTS5_VOCAB_INSTANCE ){ rc = fts5VocabInstanceNewTerm(pCsr); @@ -235509,10 +239453,6 @@ SQLITE_API int sqlite3_stmt_init( #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */ /************** End of stmt.c ************************************************/ -#if __LINE__!=235511 -#undef SQLITE_SOURCE_ID -#define SQLITE_SOURCE_ID "2021-06-18 18:36:39 5c9a6c06871cb9fe42814af9c039eb6da5427a6ec28f187af7ebfb62eafaalt2" -#endif /* Return the source-id for this library */ SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } /************************** End of sqlite3.c ******************************/ diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index a24716c..1c267a4 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -44,7 +44,30 @@ extern "C" { /* -** Provide the ability to override linkage features of the interface. +** Facilitate override of interface linkage and calling conventions. +** Be aware that these macros may not be used within this particular +** translation of the amalgamation and its associated header file. +** +** The SQLITE_EXTERN and SQLITE_API macros are used to instruct the +** compiler that the target identifier should have external linkage. +** +** The SQLITE_CDECL macro is used to set the calling convention for +** public functions that accept a variable number of arguments. +** +** The SQLITE_APICALL macro is used to set the calling convention for +** public functions that accept a fixed number of arguments. +** +** The SQLITE_STDCALL macro is no longer used and is now deprecated. +** +** The SQLITE_CALLBACK macro is used to set the calling convention for +** function pointers. +** +** The SQLITE_SYSAPI macro is used to set the calling convention for +** functions provided by the operating system. +** +** Currently, the SQLITE_CDECL, SQLITE_APICALL, SQLITE_CALLBACK, and +** SQLITE_SYSAPI macros are used only when building for environments +** that require non-default calling conventions. */ #ifndef SQLITE_EXTERN # define SQLITE_EXTERN extern @@ -124,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.36.0" -#define SQLITE_VERSION_NUMBER 3036000 -#define SQLITE_SOURCE_ID "2021-06-18 18:36:39 5c9a6c06871cb9fe42814af9c039eb6da5427a6ec28f187af7ebfb62eafa66e5" +#define SQLITE_VERSION "3.38.5" +#define SQLITE_VERSION_NUMBER 3038005 +#define SQLITE_SOURCE_ID "2022-05-06 15:25:27 78d9c993d404cdfaa7fdd2973fa1052e3da9f66215cff9c5540ebe55c407d9fe" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -538,12 +561,13 @@ SQLITE_API int sqlite3_exec( #define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) #define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) #define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8)) +#define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) #define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) -#define SQLITE_OK_SYMLINK (SQLITE_OK | (2<<8)) +#define SQLITE_OK_SYMLINK (SQLITE_OK | (2<<8)) /* internal use only */ /* ** CAPI3REF: Flags For File Open Operations @@ -551,6 +575,19 @@ SQLITE_API int sqlite3_exec( ** These bit values are intended for use in the ** 3rd parameter to the [sqlite3_open_v2()] interface and ** in the 4th parameter to the [sqlite3_vfs.xOpen] method. +** +** Only those flags marked as "Ok for sqlite3_open_v2()" may be +** used as the third argument to the [sqlite3_open_v2()] interface. +** The other flags have historically been ignored by sqlite3_open_v2(), +** though future versions of SQLite might change so that an error is +** raised if any of the disallowed bits are passed into sqlite3_open_v2(). +** Applications should not depend on the historical behavior. +** +** Note in particular that passing the SQLITE_OPEN_EXCLUSIVE flag into +** [sqlite3_open_v2()] does *not* cause the underlying database file +** to be opened using O_EXCL. Passing SQLITE_OPEN_EXCLUSIVE into +** [sqlite3_open_v2()] has historically be a no-op and might become an +** error in future versions of SQLite. */ #define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ @@ -573,6 +610,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ #define SQLITE_OPEN_NOFOLLOW 0x01000000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_EXRESCODE 0x02000000 /* Extended result codes */ /* Reserved: 0x00F00000 */ /* Legacy compatibility: */ @@ -2465,11 +2503,14 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); ** CAPI3REF: Count The Number Of Rows Modified ** METHOD: sqlite3 ** -** ^This function returns the number of rows modified, inserted or +** ^These functions return the number of rows modified, inserted or ** deleted by the most recently completed INSERT, UPDATE or DELETE ** statement on the database connection specified by the only parameter. -** ^Executing any other type of SQL statement does not modify the value -** returned by this function. +** The two functions are identical except for the type of the return value +** and that if the number of rows modified by the most recent INSERT, UPDATE +** or DELETE is greater than the maximum value supported by type "int", then +** the return value of sqlite3_changes() is undefined. ^Executing any other +** type of SQL statement does not modify the value returned by these functions. ** ** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are ** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], @@ -2518,16 +2559,21 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); ** */ SQLITE_API int sqlite3_changes(sqlite3*); +SQLITE_API sqlite3_int64 sqlite3_changes64(sqlite3*); /* ** CAPI3REF: Total Number Of Rows Modified ** METHOD: sqlite3 ** -** ^This function returns the total number of rows inserted, modified or +** ^These functions return the total number of rows inserted, modified or ** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed ** since the database connection was opened, including those executed as -** part of trigger programs. ^Executing any other type of SQL statement -** does not affect the value returned by sqlite3_total_changes(). +** part of trigger programs. The two functions are identical except for the +** type of the return value and that if the number of rows modified by the +** connection exceeds the maximum value supported by type "int", then +** the return value of sqlite3_total_changes() is undefined. ^Executing +** any other type of SQL statement does not affect the value returned by +** sqlite3_total_changes(). ** ** ^Changes made as part of [foreign key actions] are included in the ** count, but those made as part of REPLACE constraint resolution are @@ -2555,6 +2601,7 @@ SQLITE_API int sqlite3_changes(sqlite3*); ** */ SQLITE_API int sqlite3_total_changes(sqlite3*); +SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*); /* ** CAPI3REF: Interrupt A Long-Running Query @@ -3384,6 +3431,14 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** the default shared cache setting provided by ** [sqlite3_enable_shared_cache()].)^ ** +** [[OPEN_EXRESCODE]] ^(
[SQLITE_OPEN_EXRESCODE]
+**
The database connection comes up in "extended result code mode". +** In other words, the database behaves has if +** [sqlite3_extended_result_codes(db,1)] where called on the database +** connection as soon as the connection is created. In addition to setting +** the extended result code mode, this flag also causes [sqlite3_open_v2()] +** to return an extended result code.
+** ** [[OPEN_NOFOLLOW]] ^(
[SQLITE_OPEN_NOFOLLOW]
**
The database filename is not allowed to be a symbolic link
** )^ @@ -3391,7 +3446,15 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** If the 3rd parameter to sqlite3_open_v2() is not one of the ** required combinations shown above optionally combined with other ** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] -** then the behavior is undefined. +** then the behavior is undefined. Historic versions of SQLite +** have silently ignored surplus bits in the flags parameter to +** sqlite3_open_v2(), however that behavior might not be carried through +** into future versions of SQLite and so applications should not rely +** upon it. Note in particular that the SQLITE_OPEN_EXCLUSIVE flag is a no-op +** for sqlite3_open_v2(). The SQLITE_OPEN_EXCLUSIVE does *not* cause +** the open to fail if the database already exists. The SQLITE_OPEN_EXCLUSIVE +** flag is intended for use by the [sqlite3_vfs|VFS interface] only, and not +** by sqlite3_open_v2(). ** ** ^The fourth parameter to sqlite3_open_v2() is the name of the ** [sqlite3_vfs] object that defines the operating system interface that @@ -3762,13 +3825,14 @@ SQLITE_API void sqlite3_free_filename(char*); ** sqlite3_extended_errcode() might change with each API call. ** Except, there are some interfaces that are guaranteed to never ** change the value of the error code. The error-code preserving -** interfaces are: +** interfaces include the following: ** **
    **
  • sqlite3_errcode() **
  • sqlite3_extended_errcode() **
  • sqlite3_errmsg() **
  • sqlite3_errmsg16() +**
  • sqlite3_error_offset() **
** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language @@ -3783,6 +3847,13 @@ SQLITE_API void sqlite3_free_filename(char*); ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** +** ^If the most recent error references a specific token in the input +** SQL, the sqlite3_error_offset() interface returns the byte offset +** of the start of that token. ^The byte offset returned by +** sqlite3_error_offset() assumes that the input SQL is UTF8. +** ^If the most recent error does not reference a specific token in the input +** SQL, then the sqlite3_error_offset() function returns -1. +** ** When the serialized [threading mode] is in use, it might be the ** case that a second error occurs on a separate thread in between ** the time of the first error and the call to these interfaces. @@ -3802,6 +3873,7 @@ SQLITE_API int sqlite3_extended_errcode(sqlite3 *db); SQLITE_API const char *sqlite3_errmsg(sqlite3*); SQLITE_API const void *sqlite3_errmsg16(sqlite3*); SQLITE_API const char *sqlite3_errstr(int); +SQLITE_API int sqlite3_error_offset(sqlite3 *db); /* ** CAPI3REF: Prepared Statement Object @@ -4159,12 +4231,17 @@ SQLITE_API int sqlite3_prepare16_v3( ** are managed by SQLite and are automatically freed when the prepared ** statement is finalized. ** ^The string returned by sqlite3_expanded_sql(P), on the other hand, -** is obtained from [sqlite3_malloc()] and must be free by the application +** is obtained from [sqlite3_malloc()] and must be freed by the application ** by passing it to [sqlite3_free()]. +** +** ^The sqlite3_normalized_sql() interface is only available if +** the [SQLITE_ENABLE_NORMALIZE] compile-time option is defined. */ SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt); +#ifdef SQLITE_ENABLE_NORMALIZE SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt); +#endif /* ** CAPI3REF: Determine If An SQL Statement Writes The Database @@ -4208,6 +4285,10 @@ SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt); ** be false. ^Similarly, a CREATE TABLE IF NOT EXISTS statement is a ** read-only no-op if the table already exists, but ** sqlite3_stmt_readonly() still returns false for such a statement. +** +** ^If prepared statement X is an [EXPLAIN] or [EXPLAIN QUERY PLAN] +** statement, then sqlite3_stmt_readonly(X) returns the same value as +** if the EXPLAIN or EXPLAIN QUERY PLAN prefix were omitted. */ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); @@ -4276,6 +4357,8 @@ SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*); ** ** ^The sqlite3_value objects that are passed as parameters into the ** implementation of [application-defined SQL functions] are protected. +** ^The sqlite3_value objects returned by [sqlite3_vtab_rhs_value()] +** are protected. ** ^The sqlite3_value object returned by ** [sqlite3_column_value()] is unprotected. ** Unprotected sqlite3_value objects may only be used as arguments @@ -4897,6 +4980,10 @@ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); ** even empty strings, are always zero-terminated. ^The return ** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. ** +** ^Strings returned by sqlite3_column_text16() always have the endianness +** which is native to the platform, regardless of the text encoding set +** for the database. +** ** Warning: ^The object returned by [sqlite3_column_value()] is an ** [unprotected sqlite3_value] object. In a multithreaded environment, ** an unprotected sqlite3_value object may only be used safely with @@ -4910,7 +4997,7 @@ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); ** [application-defined SQL functions] or [virtual tables], not within ** top-level application code. ** -** The these routines may attempt to convert the datatype of the result. +** These routines may attempt to convert the datatype of the result. ** ^For example, if the internal representation is FLOAT and a text result ** is requested, [sqlite3_snprintf()] is used internally to perform the ** conversion automatically. ^(The following table details the conversions @@ -4935,7 +5022,7 @@ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); ** TEXT BLOB No change ** BLOB INTEGER [CAST] to INTEGER ** BLOB FLOAT [CAST] to REAL -** BLOB TEXT Add a zero terminator if needed +** BLOB TEXT [CAST] to TEXT, ensure zero terminator ** ** )^ ** @@ -6348,6 +6435,72 @@ SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); +/* +** CAPI3REF: Autovacuum Compaction Amount Callback +** METHOD: sqlite3 +** +** ^The sqlite3_autovacuum_pages(D,C,P,X) interface registers a callback +** function C that is invoked prior to each autovacuum of the database +** file. ^The callback is passed a copy of the generic data pointer (P), +** the schema-name of the attached database that is being autovacuumed, +** the the size of the database file in pages, the number of free pages, +** and the number of bytes per page, respectively. The callback should +** return the number of free pages that should be removed by the +** autovacuum. ^If the callback returns zero, then no autovacuum happens. +** ^If the value returned is greater than or equal to the number of +** free pages, then a complete autovacuum happens. +** +**

^If there are multiple ATTACH-ed database files that are being +** modified as part of a transaction commit, then the autovacuum pages +** callback is invoked separately for each file. +** +**

The callback is not reentrant. The callback function should +** not attempt to invoke any other SQLite interface. If it does, bad +** things may happen, including segmentation faults and corrupt database +** files. The callback function should be a simple function that +** does some arithmetic on its input parameters and returns a result. +** +** ^The X parameter to sqlite3_autovacuum_pages(D,C,P,X) is an optional +** destructor for the P parameter. ^If X is not NULL, then X(P) is +** invoked whenever the database connection closes or when the callback +** is overwritten by another invocation of sqlite3_autovacuum_pages(). +** +**

^There is only one autovacuum pages callback per database connection. +** ^Each call to the sqlite3_autovacuum_pages() interface overrides all +** previous invocations for that database connection. ^If the callback +** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer, +** then the autovacuum steps callback is cancelled. The return value +** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might +** be some other error code if something goes wrong. The current +** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other +** return codes might be added in future releases. +** +**

If no autovacuum pages callback is specified (the usual case) or +** a NULL pointer is provided for the callback, +** then the default behavior is to vacuum all free pages. So, in other +** words, the default behavior is the same as if the callback function +** were something like this: +** +**

+**     unsigned int demonstration_autovac_pages_callback(
+**       void *pClientData,
+**       const char *zSchema,
+**       unsigned int nDbPage,
+**       unsigned int nFreePage,
+**       unsigned int nBytePerPage
+**     ){
+**       return nFreePage;
+**     }
+** 
+*/ +SQLITE_API int sqlite3_autovacuum_pages( + sqlite3 *db, + unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int), + void*, + void(*)(void*) +); + + /* ** CAPI3REF: Data Change Notification Callbacks ** METHOD: sqlite3 @@ -6989,24 +7142,56 @@ struct sqlite3_index_info { ** ** These macros define the allowed values for the ** [sqlite3_index_info].aConstraint[].op field. Each value represents -** an operator that is part of a constraint term in the wHERE clause of +** an operator that is part of a constraint term in the WHERE clause of ** a query that uses a [virtual table]. -*/ -#define SQLITE_INDEX_CONSTRAINT_EQ 2 -#define SQLITE_INDEX_CONSTRAINT_GT 4 -#define SQLITE_INDEX_CONSTRAINT_LE 8 -#define SQLITE_INDEX_CONSTRAINT_LT 16 -#define SQLITE_INDEX_CONSTRAINT_GE 32 -#define SQLITE_INDEX_CONSTRAINT_MATCH 64 -#define SQLITE_INDEX_CONSTRAINT_LIKE 65 -#define SQLITE_INDEX_CONSTRAINT_GLOB 66 -#define SQLITE_INDEX_CONSTRAINT_REGEXP 67 -#define SQLITE_INDEX_CONSTRAINT_NE 68 -#define SQLITE_INDEX_CONSTRAINT_ISNOT 69 -#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70 -#define SQLITE_INDEX_CONSTRAINT_ISNULL 71 -#define SQLITE_INDEX_CONSTRAINT_IS 72 -#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150 +** +** ^The left-hand operand of the operator is given by the corresponding +** aConstraint[].iColumn field. ^An iColumn of -1 indicates the left-hand +** operand is the rowid. +** The SQLITE_INDEX_CONSTRAINT_LIMIT and SQLITE_INDEX_CONSTRAINT_OFFSET +** operators have no left-hand operand, and so for those operators the +** corresponding aConstraint[].iColumn is meaningless and should not be +** used. +** +** All operator values from SQLITE_INDEX_CONSTRAINT_FUNCTION through +** value 255 are reserved to represent functions that are overloaded +** by the [xFindFunction|xFindFunction method] of the virtual table +** implementation. +** +** The right-hand operands for each constraint might be accessible using +** the [sqlite3_vtab_rhs_value()] interface. Usually the right-hand +** operand is only available if it appears as a single constant literal +** in the input SQL. If the right-hand operand is another column or an +** expression (even a constant expression) or a parameter, then the +** sqlite3_vtab_rhs_value() probably will not be able to extract it. +** ^The SQLITE_INDEX_CONSTRAINT_ISNULL and +** SQLITE_INDEX_CONSTRAINT_ISNOTNULL operators have no right-hand operand +** and hence calls to sqlite3_vtab_rhs_value() for those operators will +** always return SQLITE_NOTFOUND. +** +** The collating sequence to be used for comparison can be found using +** the [sqlite3_vtab_collation()] interface. For most real-world virtual +** tables, the collating sequence of constraints does not matter (for example +** because the constraints are numeric) and so the sqlite3_vtab_collation() +** interface is no commonly needed. +*/ +#define SQLITE_INDEX_CONSTRAINT_EQ 2 +#define SQLITE_INDEX_CONSTRAINT_GT 4 +#define SQLITE_INDEX_CONSTRAINT_LE 8 +#define SQLITE_INDEX_CONSTRAINT_LT 16 +#define SQLITE_INDEX_CONSTRAINT_GE 32 +#define SQLITE_INDEX_CONSTRAINT_MATCH 64 +#define SQLITE_INDEX_CONSTRAINT_LIKE 65 +#define SQLITE_INDEX_CONSTRAINT_GLOB 66 +#define SQLITE_INDEX_CONSTRAINT_REGEXP 67 +#define SQLITE_INDEX_CONSTRAINT_NE 68 +#define SQLITE_INDEX_CONSTRAINT_ISNOT 69 +#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70 +#define SQLITE_INDEX_CONSTRAINT_ISNULL 71 +#define SQLITE_INDEX_CONSTRAINT_IS 72 +#define SQLITE_INDEX_CONSTRAINT_LIMIT 73 +#define SQLITE_INDEX_CONSTRAINT_OFFSET 74 +#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150 /* ** CAPI3REF: Register A Virtual Table Implementation @@ -7035,7 +7220,7 @@ struct sqlite3_index_info { ** destructor. ** ** ^If the third parameter (the pointer to the sqlite3_module object) is -** NULL then no new module is create and any existing modules with the +** NULL then no new module is created and any existing modules with the ** same name are dropped. ** ** See also: [sqlite3_drop_modules()] @@ -7811,7 +7996,8 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_SEEK_COUNT 30 #define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 -#define SQLITE_TESTCTRL_LAST 32 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_LOGEST 33 +#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking @@ -8334,6 +8520,16 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** The counter is incremented on the first [sqlite3_step()] call of each ** cycle. ** +** [[SQLITE_STMTSTATUS_FILTER_MISS]] +** [[SQLITE_STMTSTATUS_FILTER HIT]] +**
SQLITE_STMTSTATUS_FILTER_HIT
+** SQLITE_STMTSTATUS_FILTER_MISS
+**
^SQLITE_STMTSTATUS_FILTER_HIT is the number of times that a join +** step was bypassed because a Bloom filter returned not-found. The +** corresponding SQLITE_STMTSTATUS_FILTER_MISS value is the number of +** times that the Bloom filter returned a find, and thus the join step +** had to be processed as normal. +** ** [[SQLITE_STMTSTATUS_MEMUSED]]
SQLITE_STMTSTATUS_MEMUSED
**
^This is the approximate number of bytes of heap memory ** used to store the prepared statement. ^This value is not actually @@ -8348,6 +8544,8 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); #define SQLITE_STMTSTATUS_VM_STEP 4 #define SQLITE_STMTSTATUS_REPREPARE 5 #define SQLITE_STMTSTATUS_RUN 6 +#define SQLITE_STMTSTATUS_FILTER_MISS 7 +#define SQLITE_STMTSTATUS_FILTER_HIT 8 #define SQLITE_STMTSTATUS_MEMUSED 99 /* @@ -9011,8 +9209,9 @@ SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); ** ** A single database handle may have at most a single write-ahead log callback ** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any -** previously registered write-ahead log callback. ^Note that the -** [sqlite3_wal_autocheckpoint()] interface and the +** previously registered write-ahead log callback. ^The return value is +** a copy of the third parameter from the previous call, if any, or 0. +** ^Note that the [sqlite3_wal_autocheckpoint()] interface and the ** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will ** overwrite any prior [sqlite3_wal_hook()] settings. */ @@ -9315,19 +9514,269 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*); /* ** CAPI3REF: Determine The Collation For a Virtual Table Constraint +** METHOD: sqlite3_index_info ** ** This function may only be called from within a call to the [xBestIndex] -** method of a [virtual table]. +** method of a [virtual table]. This function returns a pointer to a string +** that is the name of the appropriate collation sequence to use for text +** comparisons on the constraint identified by its arguments. ** -** The first argument must be the sqlite3_index_info object that is the -** first parameter to the xBestIndex() method. The second argument must be -** an index into the aConstraint[] array belonging to the sqlite3_index_info -** structure passed to xBestIndex. This function returns a pointer to a buffer -** containing the name of the collation sequence for the corresponding -** constraint. +** The first argument must be the pointer to the [sqlite3_index_info] object +** that is the first parameter to the xBestIndex() method. The second argument +** must be an index into the aConstraint[] array belonging to the +** sqlite3_index_info structure passed to xBestIndex. +** +** Important: +** The first parameter must be the same pointer that is passed into the +** xBestMethod() method. The first parameter may not be a pointer to a +** different [sqlite3_index_info] object, even an exact copy. +** +** The return value is computed as follows: +** +**
    +**
  1. If the constraint comes from a WHERE clause expression that contains +** a [COLLATE operator], then the name of the collation specified by +** that COLLATE operator is returned. +**

  2. If there is no COLLATE operator, but the column that is the subject +** of the constraint specifies an alternative collating sequence via +** a [COLLATE clause] on the column definition within the CREATE TABLE +** statement that was passed into [sqlite3_declare_vtab()], then the +** name of that alternative collating sequence is returned. +**

  3. Otherwise, "BINARY" is returned. +**

*/ SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int); +/* +** CAPI3REF: Determine if a virtual table query is DISTINCT +** METHOD: sqlite3_index_info +** +** This API may only be used from within an [xBestIndex|xBestIndex method] +** of a [virtual table] implementation. The result of calling this +** interface from outside of xBestIndex() is undefined and probably harmful. +** +** ^The sqlite3_vtab_distinct() interface returns an integer that is +** either 0, 1, or 2. The integer returned by sqlite3_vtab_distinct() +** gives the virtual table additional information about how the query +** planner wants the output to be ordered. As long as the virtual table +** can meet the ordering requirements of the query planner, it may set +** the "orderByConsumed" flag. +** +**
  1. +** ^If the sqlite3_vtab_distinct() interface returns 0, that means +** that the query planner needs the virtual table to return all rows in the +** sort order defined by the "nOrderBy" and "aOrderBy" fields of the +** [sqlite3_index_info] object. This is the default expectation. If the +** virtual table outputs all rows in sorted order, then it is always safe for +** the xBestIndex method to set the "orderByConsumed" flag, regardless of +** the return value from sqlite3_vtab_distinct(). +**

  2. +** ^(If the sqlite3_vtab_distinct() interface returns 1, that means +** that the query planner does not need the rows to be returned in sorted order +** as long as all rows with the same values in all columns identified by the +** "aOrderBy" field are adjacent.)^ This mode is used when the query planner +** is doing a GROUP BY. +**

  3. +** ^(If the sqlite3_vtab_distinct() interface returns 2, that means +** that the query planner does not need the rows returned in any particular +** order, as long as rows with the same values in all "aOrderBy" columns +** are adjacent.)^ ^(Furthermore, only a single row for each particular +** combination of values in the columns identified by the "aOrderBy" field +** needs to be returned.)^ ^It is always ok for two or more rows with the same +** values in all "aOrderBy" columns to be returned, as long as all such rows +** are adjacent. ^The virtual table may, if it chooses, omit extra rows +** that have the same value for all columns identified by "aOrderBy". +** ^However omitting the extra rows is optional. +** This mode is used for a DISTINCT query. +**

+** +** ^For the purposes of comparing virtual table output values to see if the +** values are same value for sorting purposes, two NULL values are considered +** to be the same. In other words, the comparison operator is "IS" +** (or "IS NOT DISTINCT FROM") and not "==". +** +** If a virtual table implementation is unable to meet the requirements +** specified above, then it must not set the "orderByConsumed" flag in the +** [sqlite3_index_info] object or an incorrect answer may result. +** +** ^A virtual table implementation is always free to return rows in any order +** it wants, as long as the "orderByConsumed" flag is not set. ^When the +** the "orderByConsumed" flag is unset, the query planner will add extra +** [bytecode] to ensure that the final results returned by the SQL query are +** ordered correctly. The use of the "orderByConsumed" flag and the +** sqlite3_vtab_distinct() interface is merely an optimization. ^Careful +** use of the sqlite3_vtab_distinct() interface and the "orderByConsumed" +** flag might help queries against a virtual table to run faster. Being +** overly aggressive and setting the "orderByConsumed" flag when it is not +** valid to do so, on the other hand, might cause SQLite to return incorrect +** results. +*/ +SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*); + +/* +** CAPI3REF: Identify and handle IN constraints in xBestIndex +** +** This interface may only be used from within an +** [xBestIndex|xBestIndex() method] of a [virtual table] implementation. +** The result of invoking this interface from any other context is +** undefined and probably harmful. +** +** ^(A constraint on a virtual table of the form +** "[IN operator|column IN (...)]" is +** communicated to the xBestIndex method as a +** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use +** this constraint, it must set the corresponding +** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under +** the usual mode of handling IN operators, SQLite generates [bytecode] +** that invokes the [xFilter|xFilter() method] once for each value +** on the right-hand side of the IN operator.)^ Thus the virtual table +** only sees a single value from the right-hand side of the IN operator +** at a time. +** +** In some cases, however, it would be advantageous for the virtual +** table to see all values on the right-hand of the IN operator all at +** once. The sqlite3_vtab_in() interfaces facilitates this in two ways: +** +**
    +**
  1. +** ^A call to sqlite3_vtab_in(P,N,-1) will return true (non-zero) +** if and only if the [sqlite3_index_info|P->aConstraint][N] constraint +** is an [IN operator] that can be processed all at once. ^In other words, +** sqlite3_vtab_in() with -1 in the third argument is a mechanism +** by which the virtual table can ask SQLite if all-at-once processing +** of the IN operator is even possible. +** +**

  2. +** ^A call to sqlite3_vtab_in(P,N,F) with F==1 or F==0 indicates +** to SQLite that the virtual table does or does not want to process +** the IN operator all-at-once, respectively. ^Thus when the third +** parameter (F) is non-negative, this interface is the mechanism by +** which the virtual table tells SQLite how it wants to process the +** IN operator. +**

+** +** ^The sqlite3_vtab_in(P,N,F) interface can be invoked multiple times +** within the same xBestIndex method call. ^For any given P,N pair, +** the return value from sqlite3_vtab_in(P,N,F) will always be the same +** within the same xBestIndex call. ^If the interface returns true +** (non-zero), that means that the constraint is an IN operator +** that can be processed all-at-once. ^If the constraint is not an IN +** operator or cannot be processed all-at-once, then the interface returns +** false. +** +** ^(All-at-once processing of the IN operator is selected if both of the +** following conditions are met: +** +**
    +**
  1. The P->aConstraintUsage[N].argvIndex value is set to a positive +** integer. This is how the virtual table tells SQLite that it wants to +** use the N-th constraint. +** +**

  2. The last call to sqlite3_vtab_in(P,N,F) for which F was +** non-negative had F>=1. +**

)^ +** +** ^If either or both of the conditions above are false, then SQLite uses +** the traditional one-at-a-time processing strategy for the IN constraint. +** ^If both conditions are true, then the argvIndex-th parameter to the +** xFilter method will be an [sqlite3_value] that appears to be NULL, +** but which can be passed to [sqlite3_vtab_in_first()] and +** [sqlite3_vtab_in_next()] to find all values on the right-hand side +** of the IN constraint. +*/ +SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); + +/* +** CAPI3REF: Find all elements on the right-hand side of an IN constraint. +** +** These interfaces are only useful from within the +** [xFilter|xFilter() method] of a [virtual table] implementation. +** The result of invoking these interfaces from any other context +** is undefined and probably harmful. +** +** The X parameter in a call to sqlite3_vtab_in_first(X,P) or +** sqlite3_vtab_in_next(X,P) must be one of the parameters to the +** xFilter method which invokes these routines, and specifically +** a parameter that was previously selected for all-at-once IN constraint +** processing use the [sqlite3_vtab_in()] interface in the +** [xBestIndex|xBestIndex method]. ^(If the X parameter is not +** an xFilter argument that was selected for all-at-once IN constraint +** processing, then these routines return [SQLITE_MISUSE])^ or perhaps +** exhibit some other undefined or harmful behavior. +** +** ^(Use these routines to access all values on the right-hand side +** of the IN constraint using code like the following: +** +**
+**    for(rc=sqlite3_vtab_in_first(pList, &pVal);
+**        rc==SQLITE_OK && pVal
+**        rc=sqlite3_vtab_in_next(pList, &pVal)
+**    ){
+**      // do something with pVal
+**    }
+**    if( rc!=SQLITE_OK ){
+**      // an error has occurred
+**    }
+** 
)^ +** +** ^On success, the sqlite3_vtab_in_first(X,P) and sqlite3_vtab_in_next(X,P) +** routines return SQLITE_OK and set *P to point to the first or next value +** on the RHS of the IN constraint. ^If there are no more values on the +** right hand side of the IN constraint, then *P is set to NULL and these +** routines return [SQLITE_DONE]. ^The return value might be +** some other value, such as SQLITE_NOMEM, in the event of a malfunction. +** +** The *ppOut values returned by these routines are only valid until the +** next call to either of these routines or until the end of the xFilter +** method from which these routines were called. If the virtual table +** implementation needs to retain the *ppOut values for longer, it must make +** copies. The *ppOut values are [protected sqlite3_value|protected]. +*/ +SQLITE_API int sqlite3_vtab_in_first(sqlite3_value *pVal, sqlite3_value **ppOut); +SQLITE_API int sqlite3_vtab_in_next(sqlite3_value *pVal, sqlite3_value **ppOut); + +/* +** CAPI3REF: Constraint values in xBestIndex() +** METHOD: sqlite3_index_info +** +** This API may only be used from within the [xBestIndex|xBestIndex method] +** of a [virtual table] implementation. The result of calling this interface +** from outside of an xBestIndex method are undefined and probably harmful. +** +** ^When the sqlite3_vtab_rhs_value(P,J,V) interface is invoked from within +** the [xBestIndex] method of a [virtual table] implementation, with P being +** a copy of the [sqlite3_index_info] object pointer passed into xBestIndex and +** J being a 0-based index into P->aConstraint[], then this routine +** attempts to set *V to the value of the right-hand operand of +** that constraint if the right-hand operand is known. ^If the +** right-hand operand is not known, then *V is set to a NULL pointer. +** ^The sqlite3_vtab_rhs_value(P,J,V) interface returns SQLITE_OK if +** and only if *V is set to a value. ^The sqlite3_vtab_rhs_value(P,J,V) +** inteface returns SQLITE_NOTFOUND if the right-hand side of the J-th +** constraint is not available. ^The sqlite3_vtab_rhs_value() interface +** can return an result code other than SQLITE_OK or SQLITE_NOTFOUND if +** something goes wrong. +** +** The sqlite3_vtab_rhs_value() interface is usually only successful if +** the right-hand operand of a constraint is a literal value in the original +** SQL statement. If the right-hand operand is an expression or a reference +** to some other column or a [host parameter], then sqlite3_vtab_rhs_value() +** will probably return [SQLITE_NOTFOUND]. +** +** ^(Some constraints, such as [SQLITE_INDEX_CONSTRAINT_ISNULL] and +** [SQLITE_INDEX_CONSTRAINT_ISNOTNULL], have no right-hand operand. For such +** constraints, sqlite3_vtab_rhs_value() always returns SQLITE_NOTFOUND.)^ +** +** ^The [sqlite3_value] object returned in *V is a protected sqlite3_value +** and remains valid for the duration of the xBestIndex method call. +** ^When xBestIndex returns, the sqlite3_value object returned by +** sqlite3_vtab_rhs_value() is automatically deallocated. +** +** The "_rhs_" in the name of this routine is an abbreviation for +** "Right-Hand Side". +*/ +SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **ppVal); + /* ** CAPI3REF: Conflict resolution modes ** KEYWORDS: {conflict resolution mode} @@ -9879,6 +10328,10 @@ SQLITE_API unsigned char *sqlite3_serialize( ** database is currently in a read transaction or is involved in a backup ** operation. ** +** It is not possible to deserialized into the TEMP database. If the +** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the +** function returns SQLITE_ERROR. +** ** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning. diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index 5ac9570..e037857 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build cgo // +build cgo package sqlite3 @@ -21,8 +22,10 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT #cgo CFLAGS: -Wno-deprecated-declarations #cgo linux,!android CFLAGS: -DHAVE_PREAD64=1 -DHAVE_PWRITE64=1 +#cgo openbsd CFLAGS: -I/usr/local/include +#cgo openbsd LDFLAGS: -L/usr/local/lib #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif @@ -231,8 +234,14 @@ const ( columnTimestamp string = "timestamp" ) +// This variable can be replaced with -ldflags like below: +// go build -ldflags="-X 'github.com/mattn/go-sqlite3.driverName=my-sqlite3'" +var driverName = "sqlite3" + func init() { - sql.Register("sqlite3", &SQLiteDriver{}) + if driverName != "" { + sql.Register(driverName, &SQLiteDriver{}) + } } // Version returns SQLite library version information. @@ -288,6 +297,51 @@ const ( /*SQLITE_RECURSIVE = C.SQLITE_RECURSIVE*/ ) +// Standard File Control Opcodes +// See: https://www.sqlite.org/c3ref/c_fcntl_begin_atomic_write.html +const ( + SQLITE_FCNTL_LOCKSTATE = int(1) + SQLITE_FCNTL_GET_LOCKPROXYFILE = int(2) + SQLITE_FCNTL_SET_LOCKPROXYFILE = int(3) + SQLITE_FCNTL_LAST_ERRNO = int(4) + SQLITE_FCNTL_SIZE_HINT = int(5) + SQLITE_FCNTL_CHUNK_SIZE = int(6) + SQLITE_FCNTL_FILE_POINTER = int(7) + SQLITE_FCNTL_SYNC_OMITTED = int(8) + SQLITE_FCNTL_WIN32_AV_RETRY = int(9) + SQLITE_FCNTL_PERSIST_WAL = int(10) + SQLITE_FCNTL_OVERWRITE = int(11) + SQLITE_FCNTL_VFSNAME = int(12) + SQLITE_FCNTL_POWERSAFE_OVERWRITE = int(13) + SQLITE_FCNTL_PRAGMA = int(14) + SQLITE_FCNTL_BUSYHANDLER = int(15) + SQLITE_FCNTL_TEMPFILENAME = int(16) + SQLITE_FCNTL_MMAP_SIZE = int(18) + SQLITE_FCNTL_TRACE = int(19) + SQLITE_FCNTL_HAS_MOVED = int(20) + SQLITE_FCNTL_SYNC = int(21) + SQLITE_FCNTL_COMMIT_PHASETWO = int(22) + SQLITE_FCNTL_WIN32_SET_HANDLE = int(23) + SQLITE_FCNTL_WAL_BLOCK = int(24) + SQLITE_FCNTL_ZIPVFS = int(25) + SQLITE_FCNTL_RBU = int(26) + SQLITE_FCNTL_VFS_POINTER = int(27) + SQLITE_FCNTL_JOURNAL_POINTER = int(28) + SQLITE_FCNTL_WIN32_GET_HANDLE = int(29) + SQLITE_FCNTL_PDB = int(30) + SQLITE_FCNTL_BEGIN_ATOMIC_WRITE = int(31) + SQLITE_FCNTL_COMMIT_ATOMIC_WRITE = int(32) + SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE = int(33) + SQLITE_FCNTL_LOCK_TIMEOUT = int(34) + SQLITE_FCNTL_DATA_VERSION = int(35) + SQLITE_FCNTL_SIZE_LIMIT = int(36) + SQLITE_FCNTL_CKPT_DONE = int(37) + SQLITE_FCNTL_RESERVE_BYTES = int(38) + SQLITE_FCNTL_CKPT_START = int(39) + SQLITE_FCNTL_EXTERNAL_READER = int(40) + SQLITE_FCNTL_CKSM_FILE = int(41) +) + // SQLiteDriver implements driver.Driver. type SQLiteDriver struct { Extensions []string @@ -828,6 +882,10 @@ func (c *SQLiteConn) exec(ctx context.Context, query string, args []namedValue) tail := s.(*SQLiteStmt).t s.Close() if tail == "" { + if res == nil { + // https://github.com/mattn/go-sqlite3/issues/963 + res = &SQLiteResult{0, 0} + } return res, nil } query = tail @@ -1409,12 +1467,6 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { return nil, errors.New("sqlite succeeded without returning a database") } - rv = C.sqlite3_busy_timeout(db, C.int(busyTimeout)) - if rv != C.SQLITE_OK { - C.sqlite3_close_v2(db) - return nil, Error{Code: ErrNo(rv)} - } - exec := func(s string) error { cs := C.CString(s) rv := C.sqlite3_exec(db, cs, nil, nil, nil) @@ -1425,6 +1477,12 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { return nil } + // Busy timeout + if err := exec(fmt.Sprintf("PRAGMA busy_timeout = %d;", busyTimeout)); err != nil { + C.sqlite3_close_v2(db) + return nil, err + } + // USER AUTHENTICATION // // User Authentication is always performed even when @@ -1800,6 +1858,31 @@ func (c *SQLiteConn) SetLimit(id int, newVal int) int { return int(C._sqlite3_limit(c.db, C.int(id), C.int(newVal))) } +// SetFileControlInt invokes the xFileControl method on a given database. The +// dbName is the name of the database. It will default to "main" if left blank. +// The op is one of the opcodes prefixed by "SQLITE_FCNTL_". The arg argument +// and return code are both opcode-specific. Please see the SQLite documentation. +// +// This method is not thread-safe as the returned error code can be changed by +// another call if invoked concurrently. +// +// See: sqlite3_file_control, https://www.sqlite.org/c3ref/file_control.html +func (c *SQLiteConn) SetFileControlInt(dbName string, op int, arg int) error { + if dbName == "" { + dbName = "main" + } + + cDBName := C.CString(dbName) + defer C.free(unsafe.Pointer(cDBName)) + + cArg := C.int(arg) + rv := C.sqlite3_file_control(c.db, cDBName, C.int(op), unsafe.Pointer(&cArg)) + if rv != C.SQLITE_OK { + return c.lastError() + } + return nil +} + // Close the statement. func (s *SQLiteStmt) Close() error { s.mu.Lock() diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_context.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_context.go index 90800fe..7c7431d 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_context.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_context.go @@ -8,7 +8,7 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go index f241819..9433fea 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go @@ -9,7 +9,7 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_json1.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_json1.go deleted file mode 100644 index 7cfce76..0000000 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_json1.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2019 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// +build sqlite_json sqlite_json1 json1 - -package sqlite3 - -/* -#cgo CFLAGS: -DSQLITE_ENABLE_JSON1 -*/ -import "C" diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go index db7a666..b43e482 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go @@ -13,7 +13,7 @@ package sqlite3 #cgo LDFLAGS: -lm #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif @@ -33,7 +33,7 @@ import ( // The callback is passed a SQLitePreUpdateData struct with the data for // the update, as well as methods for fetching copies of impacted data. // -// If there is an existing update hook for this connection, it will be +// If there is an existing preupdate hook for this connection, it will be // removed. If callback is nil the existing hook (if any) will be removed // without creating a new one. func (c *SQLiteConn) RegisterPreUpdateHook(callback func(SQLitePreUpdateData)) { diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go index 8df453d..c510a15 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go @@ -13,7 +13,7 @@ package sqlite3 // The callback is passed a SQLitePreUpdateData struct with the data for // the update, as well as methods for fetching copies of impacted data. // -// If there is an existing update hook for this connection, it will be +// If there is an existing preupdate hook for this connection, it will be // removed. If callback is nil the existing hook (if any) will be removed // without creating a new one. func (c *SQLiteConn) RegisterPreUpdateHook(callback func(SQLitePreUpdateData)) { diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c index 1af1726..fc37b33 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c @@ -5,7 +5,7 @@ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY #include -#include +#include "sqlite3-binding.h" extern int unlock_notify_wait(sqlite3 *db); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go index 43f53e8..adfa26c 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go @@ -12,7 +12,7 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY #include -#include +#include "sqlite3-binding.h" extern void unlock_notify_callback(void *arg, int argc); */ diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go index 94203b3..b62b608 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go @@ -11,7 +11,7 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_USER_AUTHENTICATION #cgo LDFLAGS: -lm #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go index 8fd6cdf..4a93c46 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go @@ -19,7 +19,7 @@ package sqlite3 #cgo CFLAGS: -Wno-deprecated-declarations #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif @@ -472,10 +472,21 @@ func goVBestIndex(pVTab unsafe.Pointer, icp unsafe.Pointer) *C.char { } info.idxNum = C.int(res.IdxNum) - idxStr := C.CString(res.IdxStr) - defer C.free(unsafe.Pointer(idxStr)) - info.idxStr = idxStr - info.needToFreeIdxStr = C.int(0) + info.idxStr = (*C.char)(C.sqlite3_malloc(C.int(len(res.IdxStr) + 1))) + if info.idxStr == nil { + // C.malloc and C.CString ordinarily do this for you. See https://golang.org/cmd/cgo/ + panic("out of memory") + } + info.needToFreeIdxStr = C.int(1) + + idxStr := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(info.idxStr)), + Len: len(res.IdxStr) + 1, + Cap: len(res.IdxStr) + 1, + })) + copy(idxStr, res.IdxStr) + idxStr[len(idxStr)-1] = 0 // null-terminated string + if res.AlreadyOrdered { info.orderByConsumed = C.int(1) } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go index 4c8d992..56bb914 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go @@ -9,7 +9,7 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go index b4128db..0fd8210 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go @@ -1,5 +1,4 @@ // Copyright (C) 2019 Yasuhiro Matsumoto . -// // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. @@ -7,15 +6,16 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif */ import "C" import ( + "database/sql" "reflect" - "time" + "strings" ) // ColumnTypeDatabaseTypeName implement RowsColumnTypeDatabaseTypeName. @@ -31,32 +31,78 @@ func (rc *SQLiteRows) ColumnTypeLength(index int) (length int64, ok bool) { func (rc *SQLiteRows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { return 0, 0, false } +*/ // ColumnTypeNullable implement RowsColumnTypeNullable. func (rc *SQLiteRows) ColumnTypeNullable(i int) (nullable, ok bool) { - return false, false + return true, true } -*/ // ColumnTypeScanType implement RowsColumnTypeScanType. func (rc *SQLiteRows) ColumnTypeScanType(i int) reflect.Type { - switch C.sqlite3_column_type(rc.s.s, C.int(i)) { - case C.SQLITE_INTEGER: - switch C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i))) { - case "timestamp", "datetime", "date": - return reflect.TypeOf(time.Time{}) - case "boolean": - return reflect.TypeOf(false) - } - return reflect.TypeOf(int64(0)) - case C.SQLITE_FLOAT: - return reflect.TypeOf(float64(0)) - case C.SQLITE_BLOB: - return reflect.SliceOf(reflect.TypeOf(byte(0))) - case C.SQLITE_NULL: - return reflect.TypeOf(nil) - case C.SQLITE_TEXT: - return reflect.TypeOf("") - } - return reflect.SliceOf(reflect.TypeOf(byte(0))) + //ct := C.sqlite3_column_type(rc.s.s, C.int(i)) // Always returns 5 + return scanType(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))) +} + +const ( + SQLITE_INTEGER = iota + SQLITE_TEXT + SQLITE_BLOB + SQLITE_REAL + SQLITE_NUMERIC + SQLITE_TIME + SQLITE_BOOL + SQLITE_NULL +) + +func scanType(cdt string) reflect.Type { + t := strings.ToUpper(cdt) + i := databaseTypeConvSqlite(t) + switch i { + case SQLITE_INTEGER: + return reflect.TypeOf(sql.NullInt64{}) + case SQLITE_TEXT: + return reflect.TypeOf(sql.NullString{}) + case SQLITE_BLOB: + return reflect.TypeOf(sql.RawBytes{}) + case SQLITE_REAL: + return reflect.TypeOf(sql.NullFloat64{}) + case SQLITE_NUMERIC: + return reflect.TypeOf(sql.NullFloat64{}) + case SQLITE_BOOL: + return reflect.TypeOf(sql.NullBool{}) + case SQLITE_TIME: + return reflect.TypeOf(sql.NullTime{}) + } + return reflect.TypeOf(new(interface{})) +} + +func databaseTypeConvSqlite(t string) int { + if strings.Contains(t, "INT") { + return SQLITE_INTEGER + } + if t == "CLOB" || t == "TEXT" || + strings.Contains(t, "CHAR") { + return SQLITE_TEXT + } + if t == "BLOB" { + return SQLITE_BLOB + } + if t == "REAL" || t == "FLOAT" || + strings.Contains(t, "DOUBLE") { + return SQLITE_REAL + } + if t == "DATE" || t == "DATETIME" || + t == "TIMESTAMP" { + return SQLITE_TIME + } + if t == "NUMERIC" || + strings.Contains(t, "DECIMAL") { + return SQLITE_NUMERIC + } + if t == "BOOLEAN" { + return SQLITE_BOOL + } + + return SQLITE_NULL } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h index ba6d12f..301cba6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h @@ -342,6 +342,20 @@ struct sqlite3_api_routines { sqlite3_file *(*database_file_object)(const char*); /* Version 3.34.0 and later */ int (*txn_state)(sqlite3*,const char*); + /* Version 3.36.1 and later */ + sqlite3_int64 (*changes64)(sqlite3*); + sqlite3_int64 (*total_changes64)(sqlite3*); + /* Version 3.37.0 and later */ + int (*autovacuum_pages)(sqlite3*, + unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int), + void*, void(*)(void*)); + /* Version 3.38.0 and later */ + int (*error_offset)(sqlite3*); + int (*vtab_rhs_value)(sqlite3_index_info*,int,sqlite3_value**); + int (*vtab_distinct)(sqlite3_index_info*); + int (*vtab_in)(sqlite3_index_info*,int,int); + int (*vtab_in_first)(sqlite3_value*,sqlite3_value**); + int (*vtab_in_next)(sqlite3_value*,sqlite3_value**); }; /* @@ -648,6 +662,18 @@ typedef int (*sqlite3_loadext_entry)( #define sqlite3_database_file_object sqlite3_api->database_file_object /* Version 3.34.0 and later */ #define sqlite3_txn_state sqlite3_api->txn_state +/* Version 3.36.1 and later */ +#define sqlite3_changes64 sqlite3_api->changes64 +#define sqlite3_total_changes64 sqlite3_api->total_changes64 +/* Version 3.37.0 and later */ +#define sqlite3_autovacuum_pages sqlite3_api->autovacuum_pages +/* Version 3.38.0 and later */ +#define sqlite3_error_offset sqlite3_api->error_offset +#define sqlite3_vtab_rhs_value sqlite3_api->vtab_rhs_value +#define sqlite3_vtab_distinct sqlite3_api->vtab_distinct +#define sqlite3_vtab_in sqlite3_api->vtab_in +#define sqlite3_vtab_in_first sqlite3_api->vtab_in_first +#define sqlite3_vtab_in_next sqlite3_api->vtab_in_next #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) diff --git a/vendor/github.com/paulmach/orb/CHANGELOG.md b/vendor/github.com/paulmach/orb/CHANGELOG.md new file mode 100644 index 0000000..1783940 --- /dev/null +++ b/vendor/github.com/paulmach/orb/CHANGELOG.md @@ -0,0 +1,176 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +## [v0.11.1](https://github.com/paulmach/orb/compare/v0.11.0...v0.11.1) - 2024-01-29 + +### Fixed + +- geojson: `null` json into non-pointer Feature/FeatureCollection will set them to empty by [@paulmach](https://github.com/paulmach)in https://github.com/paulmach/orb/pull/145 + +## [v0.11.0](https://github.com/paulmach/orb/compare/v0.10.0...v0.11.0) - 2024-01-11 + +### Fixed + +- quadtree: InBoundMatching does not properly accept passed-in buffer by [@nirmal-vuppuluri](https://github.com/nirmal-vuppuluri) in https://github.com/paulmach/orb/pull/139 +- mvt: Do not swallow error cause by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/137 + +### Changed + +- simplify: Visvalingam, by default, keeps 3 points for "areas" by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/140 +- encoding/mvt: skip encoding of features will nil geometry by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/141 +- encoding/wkt: improve unmarshalling performance by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/142 + +## [v0.10.0](https://github.com/paulmach/orb/compare/v0.9.2...v0.10.0) - 2023-07-16 + +### Added + +- add ChildrenInZoomRange method to maptile.Tile by [@peitili](https://github.com/peitili) in https://github.com/paulmach/orb/pull/133 + +## [v0.9.2](https://github.com/paulmach/orb/compare/v0.9.1...v0.9.2) - 2023-05-04 + +### Fixed + +- encoding/wkt: better handling/validation of missing parens by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/131 + +## [v0.9.1](https://github.com/paulmach/orb/compare/v0.9.0...v0.9.1) - 2023-04-26 + +### Fixed + +- Bump up mongo driver to 1.11.4 by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/129 +- encoding/wkt: split strings with regexp by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/128 + +## [v0.9.0](https://github.com/paulmach/orb/compare/v0.8.0...v0.9.0) - 2023-02-19 + +### Added + +- geojson: marshal/unmarshal BSON [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/123 + +## [v0.8.0](https://github.com/paulmach/orb/compare/v0.7.1...v0.8.0) - 2023-01-05 + +### Fixed + +- quadtree: fix bad sort due to pointer allocation issue by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/115 +- geojson: ensure geometry unmarshal errors get returned by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/117 +- encoding/mvt: remove use of crypto/md5 to compare marshalling in tests by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/118 +- encoding/wkt: fix panic for some invalid wkt data by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/119 + +### Other + +- fix typo by [@rubenpoppe](https://github.com/rubenpoppe) in https://github.com/paulmach/orb/pull/107 +- Fixed a small twister in README.md by [@Timahawk](https://github.com/Timahawk) in https://github.com/paulmach/orb/pull/108 +- update github ci to use go 1.19 by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/116 + +## [v0.7.1](https://github.com/paulmach/orb/compare/v0.7.0...v0.7.1) - 2022-05-16 + +No changes + +The v0.7.0 tag was updated since it initially pointed to the wrong commit. This is causing caching issues. + +## [v0.7.0](https://github.com/paulmach/orb/compare/v0.6.0...v0.7.0) - 2022-05-10 + +This tag is broken, please use v0.7.1 instead. + +### Breaking Changes + +- tilecover now returns an error (vs. panicing) on non-closed 2d geometry by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/87 + + This changes the signature of many of the methods in the [maptile/tilecover](https://github.com/paulmach/orb/tree/master/maptile/tilecover) package. + To emulate the old behavior replace: + + tiles := tilecover.Geometry(poly, zoom) + + with + + tiles, err := tilecover.Geometry(poly, zoom) + if err != nil { + panic(err) + } + +## [v0.6.0](https://github.com/paulmach/orb/compare/v0.5.0...v0.6.0) - 2022-05-04 + +### Added + +- geo: add correctly spelled LengthHaversine by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/97 +- geojson: add support for "external" json encoders/decoders by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/98 +- Add ewkb encoding/decoding support by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/88 + +## [v0.5.0](https://github.com/paulmach/orb/compare/v0.4.0...v0.5.0) - 2022-04-06 + +### Added + +- encoding/mvt: stable marshalling by [@travisgrigsby](https://github.com/travisgrigsby) in https://github.com/paulmach/orb/pull/93 +- encoding/mvt: support mvt marshal for GeometryCollection by [@dadadamarine](https://github.com/dadadamarine) in https://github.com/paulmach/orb/pull/89 + +### Fixed + +- quadtree: fix cleanup of nodes during removal by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/94 + +### Other + +- encoding/wkt: various code improvements by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/95 +- update protoscan to 0.2.1 by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/83 + +## [v0.4.0](https://github.com/paulmach/orb/compare/v0.3.0...v0.4.0) - 2021-11-11 + +### Added + +- geo: Add functions to calculate points based on distance and bearing by [@thzinc](https://github.com/thzinc) in https://github.com/paulmach/orb/pull/76 + +### Fixed + +- encoding/mvt: avoid reflect nil value by [@nicklasaven](https://github.com/nicklasaven) in https://github.com/paulmach/orb/pull/78 + +## [v0.3.0](https://github.com/paulmach/orb/compare/v0.2.2...v0.3.0) - 2021-10-16 + +### Changed + +- quadtree: sort KNearest results closest first by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/75 +- ring: require >=4 points to return true when calling Closed() by [@missinglink](https://github.com/missinglink) in https://github.com/paulmach/orb/pull/70 + +### Fixed + +- encoding/mvt: verify tile coord does not overflow for z > 20 by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/74 +- quadtree: Address panic-ing quadtree.Matching(…) method when finding no closest node by [@willsalz](https://github.com/willsalz) in https://github.com/paulmach/orb/pull/73 + +## [v0.2.2](https://github.com/paulmach/orb/compare/v0.2.1...v0.2.2) - 2021-06-05 + +### Fixed + +- Dependency resolution problems in some cases, issue https://github.com/paulmach/orb/issues/65, pr https://github.com/paulmach/orb/pull/66 + +## [v0.2.1](https://github.com/paulmach/orb/compare/v0.2.0...v0.2.1) - 2021-01-16 + +### Changed + +- encoding/mvt: upgrade protoscan v0.1 -> v0.2 [`ad31566`](https://github.com/paulmach/orb/commit/ad31566942027c1cd30dd341f35123fb54676599) +- encoding/mvt: remove github.com/pkg/errors as a dependency [`d2e235`](https://github.com/paulmach/orb/commit/d2e23529a295a0d973cc787ad2742cb6ccbd5306) + +## v0.2.0 - 2021-01-16 + +### Breaking Changes + +- Foreign Members in Feature Collections + + Extra attributes in a feature collection object will now be put into `featureCollection.ExtraMembers`. + Similarly, stuff in `ExtraMembers will be marshalled into the feature collection base. + The break happens if you were decoding these foreign members using something like + + ```go + type MyFeatureCollection struct { + geojson.FeatureCollection + Title string `json:"title"` + } + ``` + + **The above will no longer work** in this release and it never supported marshalling. See https://github.com/paulmach/orb/pull/56 for more details. + +- Features with nil/missing geometry will no longer return an errors + + Previously missing or invalid geometry in a feature collection would return a `ErrInvalidGeometry` error. + However missing geometry is compliant with [section 3.2](https://tools.ietf.org/html/rfc7946#section-3.2) of the spec. + See https://github.com/paulmach/orb/issues/38 and https://github.com/paulmach/orb/pull/58 for more details. + +### Changed + +- encoding/mvt: faster unmarshalling for Mapbox Vector Tiles (MVT) see https://github.com/paulmach/orb/pull/57 diff --git a/vendor/github.com/paulmach/orb/LICENSE.md b/vendor/github.com/paulmach/orb/LICENSE.md new file mode 100644 index 0000000..526962a --- /dev/null +++ b/vendor/github.com/paulmach/orb/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Paul Mach + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/paulmach/orb/README.md b/vendor/github.com/paulmach/orb/README.md new file mode 100644 index 0000000..03fe463 --- /dev/null +++ b/vendor/github.com/paulmach/orb/README.md @@ -0,0 +1,181 @@ +# orb [![CI](https://github.com/paulmach/orb/workflows/CI/badge.svg)](https://github.com/paulmach/orb/actions?query=workflow%3ACI+event%3Apush) [![codecov](https://codecov.io/gh/paulmach/orb/branch/master/graph/badge.svg?token=NuuTjLVpKW)](https://codecov.io/gh/paulmach/orb) [![Go Report Card](https://goreportcard.com/badge/github.com/paulmach/orb)](https://goreportcard.com/report/github.com/paulmach/orb) [![Go Reference](https://pkg.go.dev/badge/github.com/paulmach/orb.svg)](https://pkg.go.dev/github.com/paulmach/orb) + +Package `orb` defines a set of types for working with 2d geo and planar/projected geometric data in Golang. +There are a set of sub-packages that use these types to do interesting things. +They each provide their own README with extra info. + +## Interesting features + +- **Simple types** - allow for natural operations using the `make`, `append`, `len`, `[s:e]` builtins. +- **GeoJSON** - support as part of the [`geojson`](geojson) sub-package. +- **Mapbox Vector Tile** - encoding and decoding as part of the [`encoding/mvt`](encoding/mvt) sub-package. +- **Direct to type from DB query results** - by scanning WKB data directly into types. +- **Rich set of sub-packages** - including [`clipping`](clip), [`simplifing`](simplify), [`quadtree`](quadtree) and more. + +## Type definitions + +```go +type Point [2]float64 +type MultiPoint []Point + +type LineString []Point +type MultiLineString []LineString + +type Ring LineString +type Polygon []Ring +type MultiPolygon []Polygon + +type Collection []Geometry + +type Bound struct { Min, Max Point } +``` + +Defining the types as slices allows them to be accessed in an idiomatic way +using Go's built-in functions such at `make`, `append`, `len` +and with slice notation like `[s:e]`. For example: + +```go +ls := make(orb.LineString, 0, 100) +ls = append(ls, orb.Point{1, 1}) +point := ls[0] +``` + +### Shared `Geometry` interface + +All of the base types implement the `orb.Geometry` interface defined as: + +```go +type Geometry interface { + GeoJSONType() string + Dimensions() int // e.g. 0d, 1d, 2d + Bound() Bound +} +``` + +This interface is accepted by functions in the sub-packages which then act on the +base types correctly. For example: + +```go +l := clip.Geometry(bound, geom) +``` + +will use the appropriate clipping algorithm depending on if the input is 1d or 2d, +e.g. a `orb.LineString` or a `orb.Polygon`. + +Only a few methods are defined directly on these type, for example `Clone`, `Equal`, `GeoJSONType`. +Other operation that depend on geo vs. planar contexts are defined in the respective sub-package. +For example: + +- Computing the geo distance between two point: + + ```go + p1 := orb.Point{-72.796408, -45.407131} + p2 := orb.Point{-72.688541, -45.384987} + + geo.Distance(p1, p2) + ``` + +- Compute the planar area and centroid of a polygon: + + ```go + poly := orb.Polygon{...} + centroid, area := planar.CentroidArea(poly) + ``` + +## GeoJSON + +The [geojson](geojson) sub-package implements Marshalling and Unmarshalling of GeoJSON data. +Features are defined as: + +```go +type Feature struct { + ID interface{} `json:"id,omitempty"` + Type string `json:"type"` + Geometry orb.Geometry `json:"geometry"` + Properties Properties `json:"properties"` +} +``` + +Defining the geometry as an `orb.Geometry` interface along with sub-package functions +accepting geometries allows them to work together to create easy to follow code. +For example, clipping all the geometries in a collection: + +```go +fc, err := geojson.UnmarshalFeatureCollection(data) +for _, f := range fc { + f.Geometry = clip.Geometry(bound, f.Geometry) +} +``` + +The library supports third party "encoding/json" replacements +such [github.com/json-iterator/go](https://github.com/json-iterator/go). +See the [geojson](geojson) readme for more details. + +The types also support BSON so they can be used directly when working with MongoDB. + +## Mapbox Vector Tiles + +The [encoding/mvt](encoding/mvt) sub-package implements Marshalling and +Unmarshalling [MVT](https://www.mapbox.com/vector-tiles/) data. +This package uses sets of `geojson.FeatureCollection` to define the layers, +keyed by the layer name. For example: + +```go +collections := map[string]*geojson.FeatureCollection{} + +// Convert to a layers object and project to tile coordinates. +layers := mvt.NewLayers(collections) +layers.ProjectToTile(maptile.New(x, y, z)) + +// In order to be used as source for MapboxGL geometries need to be clipped +// to max allowed extent. (uncomment next line) +// layers.Clip(mvt.MapboxGLDefaultExtentBound) + +// Simplify the geometry now that it's in tile coordinate space. +layers.Simplify(simplify.DouglasPeucker(1.0)) + +// Depending on use-case remove empty geometry, those too small to be +// represented in this tile space. +// In this case lines shorter than 1, and areas smaller than 2. +layers.RemoveEmpty(1.0, 2.0) + +// encoding using the Mapbox Vector Tile protobuf encoding. +data, err := mvt.Marshal(layers) // this data is NOT gzipped. + +// Sometimes MVT data is stored and transfered gzip compressed. In that case: +data, err := mvt.MarshalGzipped(layers) +``` + +## Decoding WKB/EWKB from a database query + +Geometries are usually returned from databases in WKB or EWKB format. The [encoding/ewkb](encoding/ewkb) +sub-package offers helpers to "scan" the data into the base types directly. +For example: + +```go +db.Exec( + "INSERT INTO postgis_table (point_column) VALUES (ST_GeomFromEWKB(?))", + ewkb.Value(orb.Point{1, 2}, 4326), +) + +row := db.QueryRow("SELECT ST_AsBinary(point_column) FROM postgis_table") + +var p orb.Point +err := row.Scan(ewkb.Scanner(&p)) +``` + +For more information see the readme in the [encoding/ewkb](encoding/ewkb) package. + +## List of sub-package utilities + +- [`clip`](clip) - clipping geometry to a bounding box +- [`encoding/mvt`](encoding/mvt) - encoded and decoding from [Mapbox Vector Tiles](https://www.mapbox.com/vector-tiles/) +- [`encoding/wkb`](encoding/wkb) - well-known binary as well as helpers to decode from the database queries +- [`encoding/ewkb`](encoding/ewkb) - extended well-known binary format that includes the SRID +- [`encoding/wkt`](encoding/wkt) - well-known text encoding +- [`geojson`](geojson) - working with geojson and the types in this package +- [`maptile`](maptile) - working with mercator map tiles and quadkeys +- [`project`](project) - project geometries between geo and planar contexts +- [`quadtree`](quadtree) - quadtree implementation using the types in this package +- [`resample`](resample) - resample points in a line string geometry +- [`simplify`](simplify) - linear geometry simplifications like Douglas-Peucker diff --git a/vendor/github.com/paulmach/orb/bound.go b/vendor/github.com/paulmach/orb/bound.go new file mode 100644 index 0000000..a0726d6 --- /dev/null +++ b/vendor/github.com/paulmach/orb/bound.go @@ -0,0 +1,172 @@ +package orb + +import ( + "math" +) + +var emptyBound = Bound{Min: Point{1, 1}, Max: Point{-1, -1}} + +// A Bound represents a closed box or rectangle. +// To create a bound with two points you can do something like: +// orb.MultiPoint{p1, p2}.Bound() +type Bound struct { + Min, Max Point +} + +// GeoJSONType returns the GeoJSON type for the object. +func (b Bound) GeoJSONType() string { + return "Polygon" +} + +// Dimensions returns 2 because a Bound is a 2d object. +func (b Bound) Dimensions() int { + return 2 +} + +// ToPolygon converts the bound into a Polygon object. +func (b Bound) ToPolygon() Polygon { + return Polygon{b.ToRing()} +} + +// ToRing converts the bound into a loop defined +// by the boundary of the box. +func (b Bound) ToRing() Ring { + return Ring{ + b.Min, + Point{b.Max[0], b.Min[1]}, + b.Max, + Point{b.Min[0], b.Max[1]}, + b.Min, + } +} + +// Extend grows the bound to include the new point. +func (b Bound) Extend(point Point) Bound { + // already included, no big deal + if b.Contains(point) { + return b + } + + return Bound{ + Min: Point{ + math.Min(b.Min[0], point[0]), + math.Min(b.Min[1], point[1]), + }, + Max: Point{ + math.Max(b.Max[0], point[0]), + math.Max(b.Max[1], point[1]), + }, + } +} + +// Union extends this bound to contain the union of this and the given bound. +func (b Bound) Union(other Bound) Bound { + if other.IsEmpty() { + return b + } + + b = b.Extend(other.Min) + b = b.Extend(other.Max) + b = b.Extend(other.LeftTop()) + b = b.Extend(other.RightBottom()) + + return b +} + +// Contains determines if the point is within the bound. +// Points on the boundary are considered within. +func (b Bound) Contains(point Point) bool { + if point[1] < b.Min[1] || b.Max[1] < point[1] { + return false + } + + if point[0] < b.Min[0] || b.Max[0] < point[0] { + return false + } + + return true +} + +// Intersects determines if two bounds intersect. +// Returns true if they are touching. +func (b Bound) Intersects(bound Bound) bool { + if (b.Max[0] < bound.Min[0]) || + (b.Min[0] > bound.Max[0]) || + (b.Max[1] < bound.Min[1]) || + (b.Min[1] > bound.Max[1]) { + return false + } + + return true +} + +// Pad extends the bound in all directions by the given value. +func (b Bound) Pad(d float64) Bound { + b.Min[0] -= d + b.Min[1] -= d + + b.Max[0] += d + b.Max[1] += d + + return b +} + +// Center returns the center of the bounds by "averaging" the x and y coords. +func (b Bound) Center() Point { + return Point{ + (b.Min[0] + b.Max[0]) / 2.0, + (b.Min[1] + b.Max[1]) / 2.0, + } +} + +// Top returns the top of the bound. +func (b Bound) Top() float64 { + return b.Max[1] +} + +// Bottom returns the bottom of the bound. +func (b Bound) Bottom() float64 { + return b.Min[1] +} + +// Right returns the right of the bound. +func (b Bound) Right() float64 { + return b.Max[0] +} + +// Left returns the left of the bound. +func (b Bound) Left() float64 { + return b.Min[0] +} + +// LeftTop returns the upper left point of the bound. +func (b Bound) LeftTop() Point { + return Point{b.Left(), b.Top()} +} + +// RightBottom return the lower right point of the bound. +func (b Bound) RightBottom() Point { + return Point{b.Right(), b.Bottom()} +} + +// IsEmpty returns true if it contains zero area or if +// it's in some malformed negative state where the left point is larger than the right. +// This can be caused by padding too much negative. +func (b Bound) IsEmpty() bool { + return b.Min[0] > b.Max[0] || b.Min[1] > b.Max[1] +} + +// IsZero return true if the bound just includes just null island. +func (b Bound) IsZero() bool { + return b.Max == Point{} && b.Min == Point{} +} + +// Bound returns the the same bound. +func (b Bound) Bound() Bound { + return b +} + +// Equal returns if two bounds are equal. +func (b Bound) Equal(c Bound) bool { + return b.Min == c.Min && b.Max == c.Max +} diff --git a/vendor/github.com/paulmach/orb/clone.go b/vendor/github.com/paulmach/orb/clone.go new file mode 100644 index 0000000..ac757b7 --- /dev/null +++ b/vendor/github.com/paulmach/orb/clone.go @@ -0,0 +1,56 @@ +package orb + +import ( + "fmt" +) + +// Clone will make a deep copy of the geometry. +func Clone(g Geometry) Geometry { + if g == nil { + return nil + } + + switch g := g.(type) { + case Point: + return g + case MultiPoint: + if g == nil { + return nil + } + return g.Clone() + case LineString: + if g == nil { + return nil + } + return g.Clone() + case MultiLineString: + if g == nil { + return nil + } + return g.Clone() + case Ring: + if g == nil { + return nil + } + return g.Clone() + case Polygon: + if g == nil { + return nil + } + return g.Clone() + case MultiPolygon: + if g == nil { + return nil + } + return g.Clone() + case Collection: + if g == nil { + return nil + } + return g.Clone() + case Bound: + return g + } + + panic(fmt.Sprintf("geometry type not supported: %T", g)) +} diff --git a/vendor/github.com/paulmach/orb/codecov.yml b/vendor/github.com/paulmach/orb/codecov.yml new file mode 100644 index 0000000..8bf5754 --- /dev/null +++ b/vendor/github.com/paulmach/orb/codecov.yml @@ -0,0 +1,10 @@ +coverage: + status: + project: off + patch: off + + precision: 2 + round: down + range: "70...90" + +comment: false diff --git a/vendor/github.com/paulmach/orb/define.go b/vendor/github.com/paulmach/orb/define.go new file mode 100644 index 0000000..a0b08f3 --- /dev/null +++ b/vendor/github.com/paulmach/orb/define.go @@ -0,0 +1,44 @@ +package orb + +// EarthRadius is the radius of the earth in meters. It is used in geo distance calculations. +// To keep things consistent, this value matches WGS84 Web Mercator (EPSG:3857). +const EarthRadius = 6378137.0 // meters + +// DefaultRoundingFactor is the default rounding factor used by the Round func. +var DefaultRoundingFactor = 1e6 // 6 decimal places + +// Orientation defines the order of the points in a polygon +// or closed ring. +type Orientation int8 + +// Constants to define orientation. +// They follow the right hand rule for orientation. +const ( + // CCW stands for Counter Clock Wise + CCW Orientation = 1 + + // CW stands for Clock Wise + CW Orientation = -1 +) + +// A DistanceFunc is a function that computes the distance between two points. +type DistanceFunc func(Point, Point) float64 + +// A Projection a function that moves a point from one space to another. +type Projection func(Point) Point + +// Pointer is something that can be represented by a point. +type Pointer interface { + Point() Point +} + +// A Simplifier is something that can simplify geometry. +type Simplifier interface { + Simplify(g Geometry) Geometry + LineString(ls LineString) LineString + MultiLineString(mls MultiLineString) MultiLineString + Ring(r Ring) Ring + Polygon(p Polygon) Polygon + MultiPolygon(mp MultiPolygon) MultiPolygon + Collection(c Collection) Collection +} diff --git a/vendor/github.com/paulmach/orb/equal.go b/vendor/github.com/paulmach/orb/equal.go new file mode 100644 index 0000000..7cc87dd --- /dev/null +++ b/vendor/github.com/paulmach/orb/equal.go @@ -0,0 +1,51 @@ +package orb + +import ( + "fmt" +) + +// Equal returns if the two geometrires are equal. +func Equal(g1, g2 Geometry) bool { + if g1 == nil || g2 == nil { + return g1 == g2 + } + + if g1.GeoJSONType() != g2.GeoJSONType() { + return false + } + + switch g1 := g1.(type) { + case Point: + return g1.Equal(g2.(Point)) + case MultiPoint: + return g1.Equal(g2.(MultiPoint)) + case LineString: + return g1.Equal(g2.(LineString)) + case MultiLineString: + return g1.Equal(g2.(MultiLineString)) + case Ring: + g2, ok := g2.(Ring) + if !ok { + return false + } + return g1.Equal(g2) + case Polygon: + g2, ok := g2.(Polygon) + if !ok { + return false + } + return g1.Equal(g2) + case MultiPolygon: + return g1.Equal(g2.(MultiPolygon)) + case Collection: + return g1.Equal(g2.(Collection)) + case Bound: + g2, ok := g2.(Bound) + if !ok { + return false + } + return g1.Equal(g2) + } + + panic(fmt.Sprintf("geometry type not supported: %T", g1)) +} diff --git a/vendor/github.com/paulmach/orb/geojson/README.md b/vendor/github.com/paulmach/orb/geojson/README.md new file mode 100644 index 0000000..07ca932 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/README.md @@ -0,0 +1,132 @@ +# orb/geojson [![Godoc Reference](https://pkg.go.dev/badge/github.com/paulmach/orb)](https://pkg.go.dev/github.com/paulmach/orb/geojson) + +This package **encodes and decodes** [GeoJSON](http://geojson.org/) into Go structs +using the geometries in the [orb](https://github.com/paulmach/orb) package. + +Supports both the [json.Marshaler](https://pkg.go.dev/encoding/json#Marshaler) and +[json.Unmarshaler](https://pkg.go.dev/encoding/json#Unmarshaler) interfaces. +The package also provides helper functions such as `UnmarshalFeatureCollection` and `UnmarshalFeature`. + +The types also support BSON via the [bson.Marshaler](https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#Marshaler) and +[bson.Unmarshaler](https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#Unmarshaler) interfaces. +These types can be used directly when working with MongoDB. + +## Unmarshalling (JSON -> Go) + +```go +rawJSON := []byte(` + { "type": "FeatureCollection", + "features": [ + { "type": "Feature", + "geometry": {"type": "Point", "coordinates": [102.0, 0.5]}, + "properties": {"prop0": "value0"} + } + ] + }`) + +fc, _ := geojson.UnmarshalFeatureCollection(rawJSON) + +// or + +fc := geojson.NewFeatureCollection() +err := json.Unmarshal(rawJSON, &fc) + +// Geometry will be unmarshalled into the correct geo.Geometry type. +point := fc.Features[0].Geometry.(orb.Point) +``` + +## Marshalling (Go -> JSON) + +```go +fc := geojson.NewFeatureCollection() +fc.Append(geojson.NewFeature(orb.Point{1, 2})) + +rawJSON, _ := fc.MarshalJSON() + +// or +blob, _ := json.Marshal(fc) +``` + +## Foreign/extra members in a feature collection + +```go +rawJSON := []byte(` + { "type": "FeatureCollection", + "generator": "myapp", + "timestamp": "2020-06-15T01:02:03Z", + "features": [ + { "type": "Feature", + "geometry": {"type": "Point", "coordinates": [102.0, 0.5]}, + "properties": {"prop0": "value0"} + } + ] + }`) + +fc, _ := geojson.UnmarshalFeatureCollection(rawJSON) + +fc.ExtraMembers["generator"] // == "myApp" +fc.ExtraMembers["timestamp"] // == "2020-06-15T01:02:03Z" + +// Marshalling will include values in `ExtraMembers` in the +// base featureCollection object. +``` + +## Performance + +For performance critical applications, consider a +third party replacement of "encoding/json" like [github.com/json-iterator/go](https://github.com/json-iterator/go) + +This can be enabled with something like this: + +```go +import ( + jsoniter "github.com/json-iterator/go" + "github.com/paulmach/orb" +) + +var c = jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: false, + MarshalFloatWith6Digits: true, +}.Froze() + +CustomJSONMarshaler = c +CustomJSONUnmarshaler = c +``` + +The above change can have dramatic performance implications, see the benchmarks below +on a 100k feature collection file: + +``` +benchmark old ns/op new ns/op delta +BenchmarkFeatureMarshalJSON-12 2694543 733480 -72.78% +BenchmarkFeatureUnmarshalJSON-12 5383825 2738183 -49.14% +BenchmarkGeometryMarshalJSON-12 210107 62789 -70.12% +BenchmarkGeometryUnmarshalJSON-12 691472 144689 -79.08% + +benchmark old allocs new allocs delta +BenchmarkFeatureMarshalJSON-12 7818 2316 -70.38% +BenchmarkFeatureUnmarshalJSON-12 23047 31946 +38.61% +BenchmarkGeometryMarshalJSON-12 2 3 +50.00% +BenchmarkGeometryUnmarshalJSON-12 2042 18 -99.12% + +benchmark old bytes new bytes delta +BenchmarkFeatureMarshalJSON-12 794088 490251 -38.26% +BenchmarkFeatureUnmarshalJSON-12 766354 1068497 +39.43% +BenchmarkGeometryMarshalJSON-12 24787 18650 -24.76% +BenchmarkGeometryUnmarshalJSON-12 79784 51374 -35.61% +``` + +## Feature Properties + +GeoJSON features can have properties of any type. This can cause issues in a statically typed +language such as Go. Included is a `Properties` type with some helper methods that will try to +force convert a property. An optional default, will be used if the property is missing or the wrong +type. + +```go +f.Properties.MustBool(key string, def ...bool) bool +f.Properties.MustFloat64(key string, def ...float64) float64 +f.Properties.MustInt(key string, def ...int) int +f.Properties.MustString(key string, def ...string) string +``` diff --git a/vendor/github.com/paulmach/orb/geojson/bbox.go b/vendor/github.com/paulmach/orb/geojson/bbox.go new file mode 100644 index 0000000..cd2c45e --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/bbox.go @@ -0,0 +1,38 @@ +package geojson + +import "github.com/paulmach/orb" + +// BBox is for the geojson bbox attribute which is an array with all axes +// of the most southwesterly point followed by all axes of the more northeasterly point. +type BBox []float64 + +// NewBBox creates a bbox from a a bound. +func NewBBox(b orb.Bound) BBox { + return []float64{ + b.Min[0], b.Min[1], + b.Max[0], b.Max[1], + } +} + +// Valid checks if the bbox is present and has at least 4 elements. +func (bb BBox) Valid() bool { + if bb == nil { + return false + } + + return len(bb) >= 4 && len(bb)%2 == 0 +} + +// Bound returns the orb.Bound for the BBox. +func (bb BBox) Bound() orb.Bound { + if !bb.Valid() { + return orb.Bound{} + } + + mid := len(bb) / 2 + + return orb.Bound{ + Min: orb.Point{bb[0], bb[1]}, + Max: orb.Point{bb[mid], bb[mid+1]}, + } +} diff --git a/vendor/github.com/paulmach/orb/geojson/feature.go b/vendor/github.com/paulmach/orb/geojson/feature.go new file mode 100644 index 0000000..0dd246d --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/feature.go @@ -0,0 +1,138 @@ +package geojson + +import ( + "bytes" + "fmt" + + "github.com/paulmach/orb" + "go.mongodb.org/mongo-driver/bson" +) + +// A Feature corresponds to GeoJSON feature object +type Feature struct { + ID interface{} `json:"id,omitempty"` + Type string `json:"type"` + BBox BBox `json:"bbox,omitempty"` + Geometry orb.Geometry `json:"geometry"` + Properties Properties `json:"properties"` +} + +// NewFeature creates and initializes a GeoJSON feature given the required attributes. +func NewFeature(geometry orb.Geometry) *Feature { + return &Feature{ + Type: "Feature", + Geometry: geometry, + Properties: make(map[string]interface{}), + } +} + +// Point implements the orb.Pointer interface so that Features can be used +// with quadtrees. The point returned is the center of the Bound of the geometry. +// To represent the geometry with another point you must create a wrapper type. +func (f *Feature) Point() orb.Point { + return f.Geometry.Bound().Center() +} + +var _ orb.Pointer = &Feature{} + +// MarshalJSON converts the feature object into the proper JSON. +// It will handle the encoding of all the child geometries. +// Alternately one can call json.Marshal(f) directly for the same result. +func (f Feature) MarshalJSON() ([]byte, error) { + return marshalJSON(newFeatureDoc(&f)) +} + +// MarshalBSON converts the feature object into the proper JSON. +// It will handle the encoding of all the child geometries. +// Alternately one can call json.Marshal(f) directly for the same result. +func (f Feature) MarshalBSON() ([]byte, error) { + return bson.Marshal(newFeatureDoc(&f)) +} + +func newFeatureDoc(f *Feature) *featureDoc { + doc := &featureDoc{ + ID: f.ID, + Type: "Feature", + Properties: f.Properties, + BBox: f.BBox, + Geometry: NewGeometry(f.Geometry), + } + + if len(doc.Properties) == 0 { + doc.Properties = nil + } + + return doc +} + +// UnmarshalFeature decodes the data into a GeoJSON feature. +// Alternately one can call json.Unmarshal(f) directly for the same result. +func UnmarshalFeature(data []byte) (*Feature, error) { + f := &Feature{} + err := f.UnmarshalJSON(data) + if err != nil { + return nil, err + } + + return f, nil +} + +// UnmarshalJSON handles the correct unmarshalling of the data +// into the orb.Geometry types. +func (f *Feature) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte(`null`)) { + *f = Feature{} + return nil + } + + doc := &featureDoc{} + err := unmarshalJSON(data, &doc) + if err != nil { + return err + } + + return featureUnmarshalFinish(doc, f) +} + +// UnmarshalBSON will unmarshal a BSON document created with bson.Marshal. +func (f *Feature) UnmarshalBSON(data []byte) error { + doc := &featureDoc{} + err := bson.Unmarshal(data, &doc) + if err != nil { + return err + } + + return featureUnmarshalFinish(doc, f) +} + +func featureUnmarshalFinish(doc *featureDoc, f *Feature) error { + if doc.Type != "Feature" { + return fmt.Errorf("geojson: not a feature: type=%s", doc.Type) + } + + var g orb.Geometry + if doc.Geometry != nil { + if doc.Geometry.Coordinates == nil && doc.Geometry.Geometries == nil { + return ErrInvalidGeometry + } + g = doc.Geometry.Geometry() + } + + *f = Feature{ + ID: doc.ID, + Type: doc.Type, + Properties: doc.Properties, + BBox: doc.BBox, + Geometry: g, + } + + return nil +} + +type featureDoc struct { + ID interface{} `json:"id,omitempty" bson:"id"` + Type string `json:"type" bson:"type"` + BBox BBox `json:"bbox,omitempty" bson:"bbox,omitempty"` + Geometry *Geometry `json:"geometry" bson:"geometry"` + Properties Properties `json:"properties" bson:"properties"` +} diff --git a/vendor/github.com/paulmach/orb/geojson/feature_collection.go b/vendor/github.com/paulmach/orb/geojson/feature_collection.go new file mode 100644 index 0000000..0235bc5 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/feature_collection.go @@ -0,0 +1,197 @@ +/* +Package geojson is a library for encoding and decoding GeoJSON into Go structs +using the geometries in the orb package. Supports both the json.Marshaler and +json.Unmarshaler interfaces as well as helper functions such as +`UnmarshalFeatureCollection` and `UnmarshalFeature`. +*/ +package geojson + +import ( + "bytes" + "fmt" + + "go.mongodb.org/mongo-driver/bson" +) + +const featureCollection = "FeatureCollection" + +// A FeatureCollection correlates to a GeoJSON feature collection. +type FeatureCollection struct { + Type string `json:"type"` + BBox BBox `json:"bbox,omitempty"` + Features []*Feature `json:"features"` + + // ExtraMembers can be used to encoded/decode extra key/members in + // the base of the feature collection. Note that keys of "type", "bbox" + // and "features" will not work as those are reserved by the GeoJSON spec. + ExtraMembers Properties `json:"-"` +} + +// NewFeatureCollection creates and initializes a new feature collection. +func NewFeatureCollection() *FeatureCollection { + return &FeatureCollection{ + Type: featureCollection, + Features: []*Feature{}, + } +} + +// Append appends a feature to the collection. +func (fc *FeatureCollection) Append(feature *Feature) *FeatureCollection { + fc.Features = append(fc.Features, feature) + return fc +} + +// MarshalJSON converts the feature collection object into the proper JSON. +// It will handle the encoding of all the child features and geometries. +// Alternately one can call json.Marshal(fc) directly for the same result. +// Items in the ExtraMembers map will be included in the base of the +// feature collection object. +func (fc FeatureCollection) MarshalJSON() ([]byte, error) { + m := newFeatureCollectionDoc(fc) + return marshalJSON(m) +} + +// MarshalBSON converts the feature collection object into a BSON document +// represented by bytes. It will handle the encoding of all the child features +// and geometries. +// Items in the ExtraMembers map will be included in the base of the +// feature collection object. +func (fc FeatureCollection) MarshalBSON() ([]byte, error) { + m := newFeatureCollectionDoc(fc) + return bson.Marshal(m) +} + +func newFeatureCollectionDoc(fc FeatureCollection) map[string]interface{} { + var tmp map[string]interface{} + if fc.ExtraMembers != nil { + tmp = fc.ExtraMembers.Clone() + } else { + tmp = make(map[string]interface{}, 3) + } + + tmp["type"] = featureCollection + delete(tmp, "bbox") + if fc.BBox != nil { + tmp["bbox"] = fc.BBox + } + if fc.Features == nil { + tmp["features"] = []*Feature{} + } else { + tmp["features"] = fc.Features + } + + return tmp +} + +// UnmarshalJSON decodes the data into a GeoJSON feature collection. +// Extra/foreign members will be put into the `ExtraMembers` attribute. +func (fc *FeatureCollection) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte(`null`)) { + *fc = FeatureCollection{} + return nil + } + + tmp := make(map[string]nocopyRawMessage, 4) + + err := unmarshalJSON(data, &tmp) + if err != nil { + return err + } + + *fc = FeatureCollection{} + for key, value := range tmp { + switch key { + case "type": + err := unmarshalJSON(value, &fc.Type) + if err != nil { + return err + } + case "bbox": + err := unmarshalJSON(value, &fc.BBox) + if err != nil { + return err + } + case "features": + err := unmarshalJSON(value, &fc.Features) + if err != nil { + return err + } + default: + if fc.ExtraMembers == nil { + fc.ExtraMembers = Properties{} + } + + var val interface{} + err := unmarshalJSON(value, &val) + if err != nil { + return err + } + fc.ExtraMembers[key] = val + } + } + + if fc.Type != featureCollection { + return fmt.Errorf("geojson: not a feature collection: type=%s", fc.Type) + } + + return nil +} + +// UnmarshalBSON will unmarshal a BSON document created with bson.Marshal. +// Extra/foreign members will be put into the `ExtraMembers` attribute. +func (fc *FeatureCollection) UnmarshalBSON(data []byte) error { + tmp := make(map[string]bson.RawValue, 4) + + err := bson.Unmarshal(data, &tmp) + if err != nil { + return err + } + + *fc = FeatureCollection{} + for key, value := range tmp { + switch key { + case "type": + fc.Type, _ = bson.RawValue(value).StringValueOK() + case "bbox": + err := value.Unmarshal(&fc.BBox) + if err != nil { + return err + } + case "features": + err := value.Unmarshal(&fc.Features) + if err != nil { + return err + } + default: + if fc.ExtraMembers == nil { + fc.ExtraMembers = Properties{} + } + + var val interface{} + err := value.Unmarshal(&val) + if err != nil { + return err + } + fc.ExtraMembers[key] = val + } + } + + if fc.Type != featureCollection { + return fmt.Errorf("geojson: not a feature collection: type=%s", fc.Type) + } + + return nil +} + +// UnmarshalFeatureCollection decodes the data into a GeoJSON feature collection. +// Alternately one can call json.Unmarshal(fc) directly for the same result. +func UnmarshalFeatureCollection(data []byte) (*FeatureCollection, error) { + fc := &FeatureCollection{} + + err := fc.UnmarshalJSON(data) + if err != nil { + return nil, err + } + + return fc, nil +} diff --git a/vendor/github.com/paulmach/orb/geojson/geometry.go b/vendor/github.com/paulmach/orb/geojson/geometry.go new file mode 100644 index 0000000..1524521 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/geometry.go @@ -0,0 +1,586 @@ +package geojson + +import ( + "errors" + + "github.com/paulmach/orb" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ErrInvalidGeometry will be returned if a the json of the geometry is invalid. +var ErrInvalidGeometry = errors.New("geojson: invalid geometry") + +// A Geometry matches the structure of a GeoJSON Geometry. +type Geometry struct { + Type string `json:"type"` + Coordinates orb.Geometry `json:"coordinates,omitempty"` + Geometries []*Geometry `json:"geometries,omitempty"` +} + +// NewGeometry will create a Geometry object but will convert +// the input into a GoeJSON geometry. For example, it will convert +// Rings and Bounds into Polygons. +func NewGeometry(g orb.Geometry) *Geometry { + jg := &Geometry{} + switch g := g.(type) { + case orb.Ring: + jg.Coordinates = orb.Polygon{g} + case orb.Bound: + jg.Coordinates = g.ToPolygon() + case orb.Collection: + for _, c := range g { + jg.Geometries = append(jg.Geometries, NewGeometry(c)) + } + jg.Type = g.GeoJSONType() + default: + jg.Coordinates = g + } + + if jg.Coordinates != nil { + jg.Type = jg.Coordinates.GeoJSONType() + } + return jg +} + +// Geometry returns the orb.Geometry for the geojson Geometry. +// This will convert the "Geometries" into a orb.Collection if applicable. +func (g *Geometry) Geometry() orb.Geometry { + if g.Coordinates != nil { + return g.Coordinates + } + + c := make(orb.Collection, 0, len(g.Geometries)) + for _, geom := range g.Geometries { + c = append(c, geom.Geometry()) + } + return c +} + +// MarshalJSON will marshal the geometry into the correct JSON structure. +func (g *Geometry) MarshalJSON() ([]byte, error) { + if g.Coordinates == nil && len(g.Geometries) == 0 { + return []byte(`null`), nil + } + + ng := newGeometryMarshallDoc(g) + return marshalJSON(ng) +} + +// MarshalBSON will convert the geometry into a BSON document with the structure +// of a GeoJSON Geometry. This function is used when the geometry is the top level +// document to be marshalled. +func (g *Geometry) MarshalBSON() ([]byte, error) { + ng := newGeometryMarshallDoc(g) + return bson.Marshal(ng) +} + +// MarshalBSONValue will marshal the geometry into a BSON value +// with the structure of a GeoJSON Geometry. +func (g *Geometry) MarshalBSONValue() (bsontype.Type, []byte, error) { + // implementing MarshalBSONValue allows us to marshal into a null value + // needed to match behavior with the JSON marshalling. + + if g.Coordinates == nil && len(g.Geometries) == 0 { + return bsontype.Null, nil, nil + } + + ng := newGeometryMarshallDoc(g) + return bson.MarshalValue(ng) +} + +func newGeometryMarshallDoc(g *Geometry) *geometryMarshallDoc { + ng := &geometryMarshallDoc{} + switch g := g.Coordinates.(type) { + case orb.Ring: + ng.Coordinates = orb.Polygon{g} + case orb.Bound: + ng.Coordinates = g.ToPolygon() + case orb.Collection: + ng.Geometries = make([]*Geometry, 0, len(g)) + for _, c := range g { + ng.Geometries = append(ng.Geometries, NewGeometry(c)) + } + ng.Type = g.GeoJSONType() + default: + ng.Coordinates = g + } + + if ng.Coordinates != nil { + ng.Type = ng.Coordinates.GeoJSONType() + } + + if len(g.Geometries) > 0 { + ng.Geometries = g.Geometries + ng.Type = orb.Collection{}.GeoJSONType() + } + + return ng +} + +// UnmarshalGeometry decodes the JSON data into a GeoJSON feature. +// Alternately one can call json.Unmarshal(g) directly for the same result. +func UnmarshalGeometry(data []byte) (*Geometry, error) { + g := &Geometry{} + err := unmarshalJSON(data, g) + if err != nil { + return nil, err + } + + return g, nil +} + +// UnmarshalJSON will unmarshal the correct geometry from the JSON structure. +func (g *Geometry) UnmarshalJSON(data []byte) error { + jg := &jsonGeometry{} + err := unmarshalJSON(data, jg) + if err != nil { + return err + } + + switch jg.Type { + case "Point": + p := orb.Point{} + err = unmarshalJSON(jg.Coordinates, &p) + if err != nil { + return err + } + g.Coordinates = p + case "MultiPoint": + mp := orb.MultiPoint{} + err = unmarshalJSON(jg.Coordinates, &mp) + if err != nil { + return err + } + g.Coordinates = mp + case "LineString": + ls := orb.LineString{} + err = unmarshalJSON(jg.Coordinates, &ls) + if err != nil { + return err + } + g.Coordinates = ls + case "MultiLineString": + mls := orb.MultiLineString{} + err = unmarshalJSON(jg.Coordinates, &mls) + if err != nil { + return err + } + g.Coordinates = mls + case "Polygon": + p := orb.Polygon{} + err = unmarshalJSON(jg.Coordinates, &p) + if err != nil { + return err + } + g.Coordinates = p + case "MultiPolygon": + mp := orb.MultiPolygon{} + err = unmarshalJSON(jg.Coordinates, &mp) + if err != nil { + return err + } + g.Coordinates = mp + case "GeometryCollection": + g.Geometries = jg.Geometries + default: + return ErrInvalidGeometry + } + + g.Type = g.Geometry().GeoJSONType() + + return nil +} + +// UnmarshalBSON will unmarshal a BSON document created with bson.Marshal. +func (g *Geometry) UnmarshalBSON(data []byte) error { + bg := &bsonGeometry{} + err := bson.Unmarshal(data, bg) + if err != nil { + return err + } + + switch bg.Type { + case "Point": + p := orb.Point{} + err = bg.Coordinates.Unmarshal(&p) + if err != nil { + return err + } + g.Coordinates = p + case "MultiPoint": + mp := orb.MultiPoint{} + err = bg.Coordinates.Unmarshal(&mp) + if err != nil { + return err + } + g.Coordinates = mp + case "LineString": + ls := orb.LineString{} + + err = bg.Coordinates.Unmarshal(&ls) + if err != nil { + return err + } + g.Coordinates = ls + case "MultiLineString": + mls := orb.MultiLineString{} + err = bg.Coordinates.Unmarshal(&mls) + if err != nil { + return err + } + g.Coordinates = mls + case "Polygon": + p := orb.Polygon{} + err = bg.Coordinates.Unmarshal(&p) + if err != nil { + return err + } + g.Coordinates = p + case "MultiPolygon": + mp := orb.MultiPolygon{} + err = bg.Coordinates.Unmarshal(&mp) + if err != nil { + return err + } + g.Coordinates = mp + case "GeometryCollection": + g.Geometries = bg.Geometries + default: + return ErrInvalidGeometry + } + + g.Type = g.Geometry().GeoJSONType() + + return nil +} + +// A Point is a helper type that will marshal to/from a GeoJSON Point geometry. +type Point orb.Point + +// Geometry will return the orb.Geometry version of the data. +func (p Point) Geometry() orb.Geometry { + return orb.Point(p) +} + +// MarshalJSON will convert the Point into a GeoJSON Point geometry. +func (p Point) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.Point(p)}) +} + +// MarshalBSON will convert the Point into a BSON value following the GeoJSON Point structure. +func (p Point) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.Point(p)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON Point geometry. +func (p *Point) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + point, ok := g.Coordinates.(orb.Point) + if !ok { + return errors.New("geojson: not a Point type") + } + + *p = Point(point) + return nil +} + +// UnmarshalBSON will unmarshal GeoJSON Point geometry. +func (p *Point) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + point, ok := g.Coordinates.(orb.Point) + if !ok { + return errors.New("geojson: not a Point type") + } + + *p = Point(point) + return nil +} + +// A MultiPoint is a helper type that will marshal to/from a GeoJSON MultiPoint geometry. +type MultiPoint orb.MultiPoint + +// Geometry will return the orb.Geometry version of the data. +func (mp MultiPoint) Geometry() orb.Geometry { + return orb.MultiPoint(mp) +} + +// MarshalJSON will convert the MultiPoint into a GeoJSON MultiPoint geometry. +func (mp MultiPoint) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.MultiPoint(mp)}) +} + +// MarshalBSON will convert the MultiPoint into a GeoJSON MultiPoint geometry BSON. +func (mp MultiPoint) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.MultiPoint(mp)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON MultiPoint geometry. +func (mp *MultiPoint) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + multiPoint, ok := g.Coordinates.(orb.MultiPoint) + if !ok { + return errors.New("geojson: not a MultiPoint type") + } + + *mp = MultiPoint(multiPoint) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON MultiPoint geometry. +func (mp *MultiPoint) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + multiPoint, ok := g.Coordinates.(orb.MultiPoint) + if !ok { + return errors.New("geojson: not a MultiPoint type") + } + + *mp = MultiPoint(multiPoint) + return nil +} + +// A LineString is a helper type that will marshal to/from a GeoJSON LineString geometry. +type LineString orb.LineString + +// Geometry will return the orb.Geometry version of the data. +func (ls LineString) Geometry() orb.Geometry { + return orb.LineString(ls) +} + +// MarshalJSON will convert the LineString into a GeoJSON LineString geometry. +func (ls LineString) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.LineString(ls)}) +} + +// MarshalBSON will convert the LineString into a GeoJSON LineString geometry. +func (ls LineString) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.LineString(ls)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON MultiPoint geometry. +func (ls *LineString) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + lineString, ok := g.Coordinates.(orb.LineString) + if !ok { + return errors.New("geojson: not a LineString type") + } + + *ls = LineString(lineString) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON MultiPoint geometry. +func (ls *LineString) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + lineString, ok := g.Coordinates.(orb.LineString) + if !ok { + return errors.New("geojson: not a LineString type") + } + + *ls = LineString(lineString) + return nil +} + +// A MultiLineString is a helper type that will marshal to/from a GeoJSON MultiLineString geometry. +type MultiLineString orb.MultiLineString + +// Geometry will return the orb.Geometry version of the data. +func (mls MultiLineString) Geometry() orb.Geometry { + return orb.MultiLineString(mls) +} + +// MarshalJSON will convert the MultiLineString into a GeoJSON MultiLineString geometry. +func (mls MultiLineString) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.MultiLineString(mls)}) +} + +// MarshalBSON will convert the MultiLineString into a GeoJSON MultiLineString geometry. +func (mls MultiLineString) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.MultiLineString(mls)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON MultiPoint geometry. +func (mls *MultiLineString) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + multilineString, ok := g.Coordinates.(orb.MultiLineString) + if !ok { + return errors.New("geojson: not a MultiLineString type") + } + + *mls = MultiLineString(multilineString) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON MultiPoint geometry. +func (mls *MultiLineString) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + multilineString, ok := g.Coordinates.(orb.MultiLineString) + if !ok { + return errors.New("geojson: not a MultiLineString type") + } + + *mls = MultiLineString(multilineString) + return nil +} + +// A Polygon is a helper type that will marshal to/from a GeoJSON Polygon geometry. +type Polygon orb.Polygon + +// Geometry will return the orb.Geometry version of the data. +func (p Polygon) Geometry() orb.Geometry { + return orb.Polygon(p) +} + +// MarshalJSON will convert the Polygon into a GeoJSON Polygon geometry. +func (p Polygon) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.Polygon(p)}) +} + +// MarshalBSON will convert the Polygon into a GeoJSON Polygon geometry. +func (p Polygon) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.Polygon(p)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON Polygon geometry. +func (p *Polygon) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + polygon, ok := g.Coordinates.(orb.Polygon) + if !ok { + return errors.New("geojson: not a Polygon type") + } + + *p = Polygon(polygon) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON Polygon geometry. +func (p *Polygon) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + polygon, ok := g.Coordinates.(orb.Polygon) + if !ok { + return errors.New("geojson: not a Polygon type") + } + + *p = Polygon(polygon) + return nil +} + +// A MultiPolygon is a helper type that will marshal to/from a GeoJSON MultiPolygon geometry. +type MultiPolygon orb.MultiPolygon + +// Geometry will return the orb.Geometry version of the data. +func (mp MultiPolygon) Geometry() orb.Geometry { + return orb.MultiPolygon(mp) +} + +// MarshalJSON will convert the MultiPolygon into a GeoJSON MultiPolygon geometry. +func (mp MultiPolygon) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.MultiPolygon(mp)}) +} + +// MarshalBSON will convert the MultiPolygon into a GeoJSON MultiPolygon geometry. +func (mp MultiPolygon) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.MultiPolygon(mp)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON MultiPolygon geometry. +func (mp *MultiPolygon) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + multiPolygon, ok := g.Coordinates.(orb.MultiPolygon) + if !ok { + return errors.New("geojson: not a MultiPolygon type") + } + + *mp = MultiPolygon(multiPolygon) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON MultiPolygon geometry. +func (mp *MultiPolygon) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + multiPolygon, ok := g.Coordinates.(orb.MultiPolygon) + if !ok { + return errors.New("geojson: not a MultiPolygon type") + } + + *mp = MultiPolygon(multiPolygon) + return nil +} + +type bsonGeometry struct { + Type string `json:"type" bson:"type"` + Coordinates bson.RawValue `json:"coordinates" bson:"coordinates"` + Geometries []*Geometry `json:"geometries,omitempty" bson:"geometries"` +} + +type jsonGeometry struct { + Type string `json:"type"` + Coordinates nocopyRawMessage `json:"coordinates"` + Geometries []*Geometry `json:"geometries,omitempty"` +} + +type geometryMarshallDoc struct { + Type string `json:"type" bson:"type"` + Coordinates orb.Geometry `json:"coordinates,omitempty" bson:"coordinates,omitempty"` + Geometries []*Geometry `json:"geometries,omitempty" bson:"geometries,omitempty"` +} diff --git a/vendor/github.com/paulmach/orb/geojson/json.go b/vendor/github.com/paulmach/orb/geojson/json.go new file mode 100644 index 0000000..7b8d654 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/json.go @@ -0,0 +1,74 @@ +package geojson + +import "encoding/json" + +// CustomJSONMarshaler can be set to have the code use a different +// json marshaler than the default in the standard library. +// One use case in enabling `github.com/json-iterator/go` +// with something like this: +// +// import ( +// jsoniter "github.com/json-iterator/go" +// "github.com/paulmach/orb" +// ) +// +// var c = jsoniter.Config{ +// EscapeHTML: true, +// SortMapKeys: false, +// MarshalFloatWith6Digits: true, +// }.Froze() +// +// orb.CustomJSONMarshaler = c +// orb.CustomJSONUnmarshaler = c +// +// Note that any errors encountered during marshaling will be different. +var CustomJSONMarshaler interface { + Marshal(v interface{}) ([]byte, error) +} = nil + +// CustomJSONUnmarshaler can be set to have the code use a different +// json unmarshaler than the default in the standard library. +// One use case in enabling `github.com/json-iterator/go` +// with something like this: +// +// import ( +// jsoniter "github.com/json-iterator/go" +// "github.com/paulmach/orb" +// ) +// +// var c = jsoniter.Config{ +// EscapeHTML: true, +// SortMapKeys: false, +// MarshalFloatWith6Digits: true, +// }.Froze() +// +// orb.CustomJSONMarshaler = c +// orb.CustomJSONUnmarshaler = c +// +// Note that any errors encountered during unmarshaling will be different. +var CustomJSONUnmarshaler interface { + Unmarshal(data []byte, v interface{}) error +} = nil + +func marshalJSON(v interface{}) ([]byte, error) { + if CustomJSONMarshaler == nil { + return json.Marshal(v) + } + + return CustomJSONMarshaler.Marshal(v) +} + +func unmarshalJSON(data []byte, v interface{}) error { + if CustomJSONUnmarshaler == nil { + return json.Unmarshal(data, v) + } + + return CustomJSONUnmarshaler.Unmarshal(data, v) +} + +type nocopyRawMessage []byte + +func (m *nocopyRawMessage) UnmarshalJSON(data []byte) error { + *m = data + return nil +} diff --git a/vendor/github.com/paulmach/orb/geojson/properties.go b/vendor/github.com/paulmach/orb/geojson/properties.go new file mode 100644 index 0000000..0e0eca9 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/properties.go @@ -0,0 +1,112 @@ +package geojson + +import "fmt" + +// Properties defines the feature properties with some helper methods. +type Properties map[string]interface{} + +// MustBool guarantees the return of a `bool` (with optional default). +// This function useful when you explicitly want a `bool` in a single +// value return context, for example: +// myFunc(f.Properties.MustBool("param1"), f.Properties.MustBool("optional_param", true)) +// This function will panic if the value is present but not a bool. +func (p Properties) MustBool(key string, def ...bool) bool { + v := p[key] + if b, ok := v.(bool); ok { + return b + } + + if v != nil { + panic(fmt.Sprintf("not a bool, but a %T: %v", v, v)) + } + + if len(def) > 0 { + return def[0] + } + + panic("property not found") +} + +// MustInt guarantees the return of an `int` (with optional default). +// This function useful when you explicitly want a `int` in a single +// value return context, for example: +// myFunc(f.Properties.MustInt("param1"), f.Properties.MustInt("optional_param", 123)) +// This function will panic if the value is present but not a number. +func (p Properties) MustInt(key string, def ...int) int { + v := p[key] + if i, ok := v.(int); ok { + return i + } + + if f, ok := v.(float64); ok { + return int(f) + } + + if v != nil { + panic(fmt.Sprintf("not a number, but a %T: %v", v, v)) + } + + if len(def) > 0 { + return def[0] + } + + panic("property not found") +} + +// MustFloat64 guarantees the return of a `float64` (with optional default) +// This function useful when you explicitly want a `float64` in a single +// value return context, for example: +// myFunc(f.Properties.MustFloat64("param1"), f.Properties.MustFloat64("optional_param", 10.1)) +// This function will panic if the value is present but not a number. +func (p Properties) MustFloat64(key string, def ...float64) float64 { + v := p[key] + if f, ok := v.(float64); ok { + return f + } + + if i, ok := v.(int); ok { + return float64(i) + } + + if v != nil { + panic(fmt.Sprintf("not a number, but a %T: %v", v, v)) + } + + if len(def) > 0 { + return def[0] + } + + panic("property not found") +} + +// MustString guarantees the return of a `string` (with optional default) +// This function useful when you explicitly want a `string` in a single +// value return context, for example: +// myFunc(f.Properties.MustString("param1"), f.Properties.MustString("optional_param", "default")) +// This function will panic if the value is present but not a string. +func (p Properties) MustString(key string, def ...string) string { + v := p[key] + if s, ok := v.(string); ok { + return s + } + + if v != nil { + panic(fmt.Sprintf("not a string, but a %T: %v", v, v)) + } + + if len(def) > 0 { + return def[0] + } + + panic("property not found") +} + +// Clone returns a shallow copy of the properties. +func (p Properties) Clone() Properties { + n := make(Properties, len(p)+3) + for k, v := range p { + n[k] = v + } + + return n +} diff --git a/vendor/github.com/paulmach/orb/geojson/types.go b/vendor/github.com/paulmach/orb/geojson/types.go new file mode 100644 index 0000000..7a6a7e7 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/types.go @@ -0,0 +1,11 @@ +package geojson + +// A list of the geojson types that are currently supported. +const ( + TypePoint = "Point" + TypeMultiPoint = "MultiPoint" + TypeLineString = "LineString" + TypeMultiLineString = "MultiLineString" + TypePolygon = "Polygon" + TypeMultiPolygon = "MultiPolygon" +) diff --git a/vendor/github.com/paulmach/orb/geometry.go b/vendor/github.com/paulmach/orb/geometry.go new file mode 100644 index 0000000..a15e36b --- /dev/null +++ b/vendor/github.com/paulmach/orb/geometry.go @@ -0,0 +1,146 @@ +package orb + +// Geometry is an interface that represents the shared attributes +// of a geometry. +type Geometry interface { + GeoJSONType() string + Dimensions() int // e.g. 0d, 1d, 2d + Bound() Bound + + // requiring because sub package type switch over all possible types. + private() +} + +// compile time checks +var ( + _ Geometry = Point{} + _ Geometry = MultiPoint{} + _ Geometry = LineString{} + _ Geometry = MultiLineString{} + _ Geometry = Ring{} + _ Geometry = Polygon{} + _ Geometry = MultiPolygon{} + _ Geometry = Bound{} + + _ Geometry = Collection{} +) + +func (p Point) private() {} +func (mp MultiPoint) private() {} +func (ls LineString) private() {} +func (mls MultiLineString) private() {} +func (r Ring) private() {} +func (p Polygon) private() {} +func (mp MultiPolygon) private() {} +func (b Bound) private() {} +func (c Collection) private() {} + +// AllGeometries lists all possible types and values that a geometry +// interface can be. It should be used only for testing to verify +// functions that accept a Geometry will work in all cases. +var AllGeometries = []Geometry{ + nil, + Point{}, + MultiPoint{}, + LineString{}, + MultiLineString{}, + Ring{}, + Polygon{}, + MultiPolygon{}, + Bound{}, + Collection{}, + + // nil values + MultiPoint(nil), + LineString(nil), + MultiLineString(nil), + Ring(nil), + Polygon(nil), + MultiPolygon(nil), + Collection(nil), + + // Collection of Collection + Collection{Collection{Point{}}}, +} + +// A Collection is a collection of geometries that is also a Geometry. +type Collection []Geometry + +// GeoJSONType returns the geometry collection type. +func (c Collection) GeoJSONType() string { + return "GeometryCollection" +} + +// Dimensions returns the max of the dimensions of the collection. +func (c Collection) Dimensions() int { + max := -1 + for _, g := range c { + if d := g.Dimensions(); d > max { + max = d + } + } + + return max +} + +// Bound returns the bounding box of all the Geometries combined. +func (c Collection) Bound() Bound { + if len(c) == 0 { + return emptyBound + } + + var b Bound + start := -1 + + for i, g := range c { + if g != nil { + start = i + b = g.Bound() + break + } + } + + if start == -1 { + return emptyBound + } + + for i := start + 1; i < len(c); i++ { + if c[i] == nil { + continue + } + + b = b.Union(c[i].Bound()) + } + + return b +} + +// Equal compares two collections. Returns true if lengths are the same +// and all the sub geometries are the same and in the same order. +func (c Collection) Equal(collection Collection) bool { + if len(c) != len(collection) { + return false + } + + for i, g := range c { + if !Equal(g, collection[i]) { + return false + } + } + + return true +} + +// Clone returns a deep copy of the collection. +func (c Collection) Clone() Collection { + if c == nil { + return nil + } + + nc := make(Collection, len(c)) + for i, g := range c { + nc[i] = Clone(g) + } + + return nc +} diff --git a/vendor/github.com/paulmach/orb/internal/mercator/mercator.go b/vendor/github.com/paulmach/orb/internal/mercator/mercator.go new file mode 100644 index 0000000..4008068 --- /dev/null +++ b/vendor/github.com/paulmach/orb/internal/mercator/mercator.go @@ -0,0 +1,57 @@ +package mercator + +import "math" + +// for testing +var ( + Epsilon = 1e-6 + + Cities = [][2]float64{ + {57.09700, 9.85000}, {49.03000, -122.32000}, {39.23500, -76.17490}, + {57.20000, -2.20000}, {16.75000, -99.76700}, {5.60000, -0.16700}, + {51.66700, -176.46700}, {9.00000, 38.73330}, {-34.7666, 138.53670}, + {12.80000, 45.00000}, {42.70000, -110.86700}, {13.48167, 144.79330}, + {33.53300, -81.71700}, {42.53300, -99.85000}, {26.01670, 50.55000}, + {35.75000, -84.00000}, {51.11933, -1.15543}, {82.52000, -62.28000}, + {32.91700, -85.91700}, {31.19000, 29.95000}, {36.70000, 3.21700}, + {34.14000, -118.10700}, {32.50370, -116.45100}, {47.83400, 10.86800}, + {28.25000, 129.70000}, {16.75000, -22.95000}, {31.95000, 35.95000}, + {52.35000, 4.86660}, {13.58670, 144.93670}, {6.90000, 134.15000}, + {40.03000, 32.90000}, {33.65000, -85.78300}, {49.33000, 10.59700}, + {17.13330, -61.78330}, {-23.4333, -70.60000}, {51.21670, 4.40000}, + {29.60000, 35.01000}, {38.58330, -121.48300}, {34.16700, -97.13300}, + {45.60000, 9.15000}, {-18.3500, -70.33330}, {-7.88000, -14.42000}, + {15.28330, 38.90000}, {-25.2333, -57.51670}, {23.96500, 32.82000}, + {-36.8832, 174.75000}, {-38.0333, 144.46670}, {46.03300, 12.60000}, + {41.66700, -72.83300}, {35.45000, 139.45000}} +) + +// ToPlanar converts the point to geo world coordinates at the given live. +func ToPlanar(lng, lat float64, level uint32) (x, y float64) { + maxtiles := float64(uint64(1 << level)) + x = (lng/360.0 + 0.5) * maxtiles + + // bound it because we have a top of the world problem + siny := math.Sin(lat * math.Pi / 180.0) + + if siny < -0.9999 { + y = 0 + } else if siny > 0.9999 { + y = maxtiles - 1 + } else { + lat = 0.5 + 0.5*math.Log((1.0+siny)/(1.0-siny))/(-2*math.Pi) + y = lat * maxtiles + } + + return +} + +// ToGeo projects world coordinates back to geo coordinates. +func ToGeo(x, y float64, level uint32) (lng, lat float64) { + maxtiles := float64(uint64(1 << level)) + + lng = 360.0 * (x/maxtiles - 0.5) + lat = 2.0*math.Atan(math.Exp(math.Pi-(2*math.Pi)*(y/maxtiles)))*(180.0/math.Pi) - 90.0 + + return lng, lat +} diff --git a/vendor/github.com/paulmach/orb/line_string.go b/vendor/github.com/paulmach/orb/line_string.go new file mode 100644 index 0000000..6f48939 --- /dev/null +++ b/vendor/github.com/paulmach/orb/line_string.go @@ -0,0 +1,40 @@ +package orb + +// LineString represents a set of points to be thought of as a polyline. +type LineString []Point + +// GeoJSONType returns the GeoJSON type for the object. +func (ls LineString) GeoJSONType() string { + return "LineString" +} + +// Dimensions returns 1 because a LineString is a 1d object. +func (ls LineString) Dimensions() int { + return 1 +} + +// Reverse will reverse the line string. +// This is done inplace, ie. it modifies the original data. +func (ls LineString) Reverse() { + l := len(ls) - 1 + for i := 0; i <= l/2; i++ { + ls[i], ls[l-i] = ls[l-i], ls[i] + } +} + +// Bound returns a rect around the line string. Uses rectangular coordinates. +func (ls LineString) Bound() Bound { + return MultiPoint(ls).Bound() +} + +// Equal compares two line strings. Returns true if lengths are the same +// and all points are Equal. +func (ls LineString) Equal(lineString LineString) bool { + return MultiPoint(ls).Equal(MultiPoint(lineString)) +} + +// Clone returns a new copy of the line string. +func (ls LineString) Clone() LineString { + ps := MultiPoint(ls) + return LineString(ps.Clone()) +} diff --git a/vendor/github.com/paulmach/orb/maptile/README.md b/vendor/github.com/paulmach/orb/maptile/README.md new file mode 100644 index 0000000..3dea22e --- /dev/null +++ b/vendor/github.com/paulmach/orb/maptile/README.md @@ -0,0 +1,28 @@ +# orb/maptile [![Godoc Reference](https://pkg.go.dev/badge/github.com/paulmach/orb)](https://pkg.go.dev/github.com/paulmach/orb/maptile) + +Package `maptile` provides types and methods for working with +[web mercator map tiles](https://www.google.com/search?q=web+mercator+map+tiles). +It defines a tile as: + +```go +type Tile struct { + X, Y uint32 + Z Zoom +} + +type Zoom uint32 +``` + +Functions are provided to create tiles from lon/lat points as well as +[quadkeys](https://msdn.microsoft.com/en-us/library/bb259689.aspx). +The tile defines helper methods such as `Parent()`, `Children()`, `Siblings()`, etc. + +## List of sub-package utilities + +- [`tilecover`](tilecover) - computes the covering set of tiles for an `orb.Geometry`. + +## Similar libraries in other languages: + +- [mercantile](https://github.com/mapbox/mercantile) - Python +- [sphericalmercator](https://github.com/mapbox/sphericalmercator) - Node +- [tilebelt](https://github.com/mapbox/tilebelt) - Node diff --git a/vendor/github.com/paulmach/orb/maptile/set.go b/vendor/github.com/paulmach/orb/maptile/set.go new file mode 100644 index 0000000..b56428b --- /dev/null +++ b/vendor/github.com/paulmach/orb/maptile/set.go @@ -0,0 +1,29 @@ +package maptile + +import ( + "github.com/paulmach/orb/geojson" +) + +// Set is a map/hash of tiles. +type Set map[Tile]bool + +// ToFeatureCollection converts a set of tiles into a feature collection. +// This method is mostly useful for debugging output. +func (s Set) ToFeatureCollection() *geojson.FeatureCollection { + fc := geojson.NewFeatureCollection() + fc.Features = make([]*geojson.Feature, 0, len(s)) + for t := range s { + fc.Append(geojson.NewFeature(t.Bound().ToPolygon())) + } + + return fc +} + +// Merge will merge the given set into the existing set. +func (s Set) Merge(set Set) { + for t, v := range set { + if v { + s[t] = true + } + } +} diff --git a/vendor/github.com/paulmach/orb/maptile/tile.go b/vendor/github.com/paulmach/orb/maptile/tile.go new file mode 100644 index 0000000..1e61bd3 --- /dev/null +++ b/vendor/github.com/paulmach/orb/maptile/tile.go @@ -0,0 +1,307 @@ +// Package maptile defines a Tile type and methods to work with +// web map projected tile data. +package maptile + +import ( + "math" + "math/bits" + + "github.com/paulmach/orb" + "github.com/paulmach/orb/geojson" + "github.com/paulmach/orb/internal/mercator" +) + +// Tiles is a set of tiles, later we can add methods to this. +type Tiles []Tile + +// ToFeatureCollection converts the tiles into a feature collection. +// This method is mostly useful for debugging output. +func (ts Tiles) ToFeatureCollection() *geojson.FeatureCollection { + fc := geojson.NewFeatureCollection() + fc.Features = make([]*geojson.Feature, 0, len(ts)) + for _, t := range ts { + fc.Append(geojson.NewFeature(t.Bound().ToPolygon())) + } + + return fc +} + +// Tile is an x, y, z web mercator tile. +type Tile struct { + X, Y uint32 + Z Zoom +} + +// A Zoom is a strict type for a tile zoom level. +type Zoom uint32 + +// New creates a new tile with the given coordinates. +func New(x, y uint32, z Zoom) Tile { + return Tile{x, y, z} +} + +// At creates a tile for the point at the given zoom. +// Will create a valid tile for the zoom. Points outside +// the range lat [-85.0511, 85.0511] will be snapped to the +// max or min tile as appropriate. +func At(ll orb.Point, z Zoom) Tile { + f := Fraction(ll, z) + t := Tile{ + X: uint32(f[0]), + Y: uint32(f[1]), + Z: z, + } + + return t +} + +// FromQuadkey creates the tile from the quadkey. +func FromQuadkey(k uint64, z Zoom) Tile { + t := Tile{Z: z} + + for i := Zoom(0); i < z; i++ { + t.X |= uint32((k & (1 << (2 * i))) >> i) + t.Y |= uint32((k & (1 << (2*i + 1))) >> (i + 1)) + } + + return t +} + +// Valid returns if the tile's x/y are within the range for the tile's zoom. +func (t Tile) Valid() bool { + maxIndex := uint32(1) << uint32(t.Z) + return t.X < maxIndex && t.Y < maxIndex +} + +// Bound returns the geo bound for the tile. +// An optional tileBuffer parameter can be passes to create a buffer +// around the bound in tile dimension. e.g. a tileBuffer of 1 would create +// a bound 9x the size of the tile, centered around the provided tile. +func (t Tile) Bound(tileBuffer ...float64) orb.Bound { + buffer := 0.0 + if len(tileBuffer) > 0 { + buffer = tileBuffer[0] + } + + x := float64(t.X) + y := float64(t.Y) + + minx := x - buffer + + miny := y - buffer + if miny < 0 { + miny = 0 + } + + lon1, lat1 := mercator.ToGeo(minx, miny, uint32(t.Z)) + + maxx := x + 1 + buffer + + maxtiles := float64(uint32(1 << t.Z)) + maxy := y + 1 + buffer + if maxy > maxtiles { + maxy = maxtiles + } + + lon2, lat2 := mercator.ToGeo(maxx, maxy, uint32(t.Z)) + + return orb.Bound{ + Min: orb.Point{lon1, lat2}, + Max: orb.Point{lon2, lat1}, + } +} + +// Center returns the center of the tile. +func (t Tile) Center() orb.Point { + return t.Bound(0).Center() +} + +// Contains returns if the given tile is fully contained (or equal to) the give tile. +func (t Tile) Contains(tile Tile) bool { + if tile.Z < t.Z { + return false + } + + return t == tile.toZoom(t.Z) +} + +// Parent returns the parent of the tile. +func (t Tile) Parent() Tile { + if t.Z == 0 { + return t + } + + return Tile{ + X: t.X >> 1, + Y: t.Y >> 1, + Z: t.Z - 1, + } +} + +// Fraction returns the precise tile fraction at the given zoom. +// Will return 2^zoom-1 if the point is below 85.0511 S. +func Fraction(ll orb.Point, z Zoom) orb.Point { + var p orb.Point + + factor := uint32(1 << z) + maxtiles := float64(factor) + + lng := ll[0]/360.0 + 0.5 + p[0] = lng * maxtiles + + // bound it because we have a top of the world problem + if ll[1] < -85.0511 { + p[1] = maxtiles - 1 + } else if ll[1] > 85.0511 { + p[1] = 0 + } else { + siny := math.Sin(ll[1] * math.Pi / 180.0) + lat := 0.5 + 0.5*math.Log((1.0+siny)/(1.0-siny))/(-2*math.Pi) + p[1] = lat * maxtiles + } + + return p +} + +// SharedParent returns the tile that contains both the tiles. +func (t Tile) SharedParent(tile Tile) Tile { + // bring both tiles to the lowest zoom. + if t.Z != tile.Z { + if t.Z < tile.Z { + tile = tile.toZoom(t.Z) + } else { + t = t.toZoom(tile.Z) + } + } + + if t == tile { + return t + } + + // go version < 1.9 + // bit package usage was about 10% faster + // + // TODO: use build flags to support older versions of go. + // + // move from most significant to least until there isn't a match. + // for i := t.Z - 1; i >= 0; i-- { + // if t.X&(1<> (i + 1), + // t.Y >> (i + 1), + // t.Z - (i + 1), + // } + // } + // } + // + // if we reach here the tiles are the same, which was checked above. + // panic("unreachable") + + // bits different for x and y + xc := uint32(32 - bits.LeadingZeros32(t.X^tile.X)) + yc := uint32(32 - bits.LeadingZeros32(t.Y^tile.Y)) + + // max of xc, yc + maxc := xc + if yc > maxc { + maxc = yc + + } + + return Tile{ + X: t.X >> maxc, + Y: t.Y >> maxc, + Z: t.Z - Zoom(maxc), + } +} + +// Children returns the 4 children of the tile. +func (t Tile) Children() Tiles { + return Tiles{ + Tile{t.X << 1, t.Y << 1, t.Z + 1}, + Tile{(t.X << 1) + 1, t.Y << 1, t.Z + 1}, + Tile{(t.X << 1) + 1, (t.Y << 1) + 1, t.Z + 1}, + Tile{t.X << 1, (t.Y << 1) + 1, t.Z + 1}, + } +} + +// ChildrenInZoomRange returns all the children tiles of tile from ranges [zoomStart, zoomEnd], both ends inclusive. +func ChildrenInZoomRange(tile Tile, zoomStart, zoomEnd Zoom) Tiles { + if !(zoomStart <= zoomEnd) { + panic("zoomStart must be <= zoomEnd") + } + if !(tile.Z <= zoomStart) { + panic("tile.Z is must be <= zoomStart") + } + + zDeltaStart := zoomStart - tile.Z + zDeltaEnd := zoomEnd - tile.Z + + res := make([]Tile, 0) + + for d := zDeltaStart; d <= zDeltaEnd; d++ { + xStart := tile.X << d + yStart := tile.Y << d + dim := uint32(1 << d) + for x := xStart; x < xStart+dim; x++ { + for y := yStart; y < yStart+dim; y++ { + res = append(res, New(x, y, tile.Z+d)) + } + } + } + + return res +} + +// Siblings returns the 4 tiles that share this tile's parent. +func (t Tile) Siblings() Tiles { + return t.Parent().Children() +} + +// Quadkey returns the quad key for the tile. +func (t Tile) Quadkey() uint64 { + var i, result uint64 + for i = 0; i < uint64(t.Z); i++ { + result |= (uint64(t.X) & (1 << i)) << i + result |= (uint64(t.Y) & (1 << i)) << (i + 1) + } + + return result +} + +// Range returns the min and max tile "range" to cover the tile +// at the given zoom. +func (t Tile) Range(z Zoom) (min, max Tile) { + if z < t.Z { + t = t.toZoom(z) + return t, t + } + + offset := z - t.Z + return Tile{ + X: t.X << offset, + Y: t.Y << offset, + Z: z, + }, Tile{ + X: ((t.X + 1) << offset) - 1, + Y: ((t.Y + 1) << offset) - 1, + Z: z, + } +} + +func (t Tile) toZoom(z Zoom) Tile { + if z > t.Z { + return Tile{ + X: t.X << (z - t.Z), + Y: t.Y << (z - t.Z), + Z: z, + } + } + + return Tile{ + X: t.X >> (t.Z - z), + Y: t.Y >> (t.Z - z), + Z: z, + } +} diff --git a/vendor/github.com/paulmach/orb/multi_line_string.go b/vendor/github.com/paulmach/orb/multi_line_string.go new file mode 100644 index 0000000..2b67f20 --- /dev/null +++ b/vendor/github.com/paulmach/orb/multi_line_string.go @@ -0,0 +1,58 @@ +package orb + +// MultiLineString is a set of polylines. +type MultiLineString []LineString + +// GeoJSONType returns the GeoJSON type for the object. +func (mls MultiLineString) GeoJSONType() string { + return "MultiLineString" +} + +// Dimensions returns 1 because a MultiLineString is a 2d object. +func (mls MultiLineString) Dimensions() int { + return 1 +} + +// Bound returns a bound around all the line strings. +func (mls MultiLineString) Bound() Bound { + if len(mls) == 0 { + return emptyBound + } + + bound := mls[0].Bound() + for i := 1; i < len(mls); i++ { + bound = bound.Union(mls[i].Bound()) + } + + return bound +} + +// Equal compares two multi line strings. Returns true if lengths are the same +// and all points are Equal. +func (mls MultiLineString) Equal(multiLineString MultiLineString) bool { + if len(mls) != len(multiLineString) { + return false + } + + for i, ls := range mls { + if !ls.Equal(multiLineString[i]) { + return false + } + } + + return true +} + +// Clone returns a new deep copy of the multi line string. +func (mls MultiLineString) Clone() MultiLineString { + if mls == nil { + return nil + } + + nmls := make(MultiLineString, 0, len(mls)) + for _, ls := range mls { + nmls = append(nmls, ls.Clone()) + } + + return nmls +} diff --git a/vendor/github.com/paulmach/orb/multi_point.go b/vendor/github.com/paulmach/orb/multi_point.go new file mode 100644 index 0000000..cee955c --- /dev/null +++ b/vendor/github.com/paulmach/orb/multi_point.go @@ -0,0 +1,56 @@ +package orb + +// A MultiPoint represents a set of points in the 2D Eucledian or Cartesian plane. +type MultiPoint []Point + +// GeoJSONType returns the GeoJSON type for the object. +func (mp MultiPoint) GeoJSONType() string { + return "MultiPoint" +} + +// Dimensions returns 0 because a MultiPoint is a 0d object. +func (mp MultiPoint) Dimensions() int { + return 0 +} + +// Clone returns a new copy of the points. +func (mp MultiPoint) Clone() MultiPoint { + if mp == nil { + return nil + } + + points := make([]Point, len(mp)) + copy(points, mp) + + return MultiPoint(points) +} + +// Bound returns a bound around the points. Uses rectangular coordinates. +func (mp MultiPoint) Bound() Bound { + if len(mp) == 0 { + return emptyBound + } + + b := Bound{mp[0], mp[0]} + for _, p := range mp { + b = b.Extend(p) + } + + return b +} + +// Equal compares two MultiPoint objects. Returns true if lengths are the same +// and all points are Equal, and in the same order. +func (mp MultiPoint) Equal(multiPoint MultiPoint) bool { + if len(mp) != len(multiPoint) { + return false + } + + for i := range mp { + if !mp[i].Equal(multiPoint[i]) { + return false + } + } + + return true +} diff --git a/vendor/github.com/paulmach/orb/multi_polygon.go b/vendor/github.com/paulmach/orb/multi_polygon.go new file mode 100644 index 0000000..938a9a9 --- /dev/null +++ b/vendor/github.com/paulmach/orb/multi_polygon.go @@ -0,0 +1,56 @@ +package orb + +// MultiPolygon is a set of polygons. +type MultiPolygon []Polygon + +// GeoJSONType returns the GeoJSON type for the object. +func (mp MultiPolygon) GeoJSONType() string { + return "MultiPolygon" +} + +// Dimensions returns 2 because a MultiPolygon is a 2d object. +func (mp MultiPolygon) Dimensions() int { + return 2 +} + +// Bound returns a bound around the multi-polygon. +func (mp MultiPolygon) Bound() Bound { + if len(mp) == 0 { + return emptyBound + } + bound := mp[0].Bound() + for i := 1; i < len(mp); i++ { + bound = bound.Union(mp[i].Bound()) + } + + return bound +} + +// Equal compares two multi-polygons. +func (mp MultiPolygon) Equal(multiPolygon MultiPolygon) bool { + if len(mp) != len(multiPolygon) { + return false + } + + for i, p := range mp { + if !p.Equal(multiPolygon[i]) { + return false + } + } + + return true +} + +// Clone returns a new deep copy of the multi-polygon. +func (mp MultiPolygon) Clone() MultiPolygon { + if mp == nil { + return nil + } + + nmp := make(MultiPolygon, 0, len(mp)) + for _, p := range mp { + nmp = append(nmp, p.Clone()) + } + + return nmp +} diff --git a/vendor/github.com/paulmach/orb/point.go b/vendor/github.com/paulmach/orb/point.go new file mode 100644 index 0000000..c459c35 --- /dev/null +++ b/vendor/github.com/paulmach/orb/point.go @@ -0,0 +1,51 @@ +package orb + +// A Point is a Lon/Lat 2d point. +type Point [2]float64 + +var _ Pointer = Point{} + +// GeoJSONType returns the GeoJSON type for the object. +func (p Point) GeoJSONType() string { + return "Point" +} + +// Dimensions returns 0 because a point is a 0d object. +func (p Point) Dimensions() int { + return 0 +} + +// Bound returns a single point bound of the point. +func (p Point) Bound() Bound { + return Bound{p, p} +} + +// Point returns itself so it implements the Pointer interface. +func (p Point) Point() Point { + return p +} + +// Y returns the vertical coordinate of the point. +func (p Point) Y() float64 { + return p[1] +} + +// X returns the horizontal coordinate of the point. +func (p Point) X() float64 { + return p[0] +} + +// Lat returns the vertical, latitude coordinate of the point. +func (p Point) Lat() float64 { + return p[1] +} + +// Lon returns the horizontal, longitude coordinate of the point. +func (p Point) Lon() float64 { + return p[0] +} + +// Equal checks if the point represents the same point or vector. +func (p Point) Equal(point Point) bool { + return p[0] == point[0] && p[1] == point[1] +} diff --git a/vendor/github.com/paulmach/orb/polygon.go b/vendor/github.com/paulmach/orb/polygon.go new file mode 100644 index 0000000..b3e7d29 --- /dev/null +++ b/vendor/github.com/paulmach/orb/polygon.go @@ -0,0 +1,55 @@ +package orb + +// Polygon is a closed area. The first LineString is the outer ring. +// The others are the holes. Each LineString is expected to be closed +// ie. the first point matches the last. +type Polygon []Ring + +// GeoJSONType returns the GeoJSON type for the object. +func (p Polygon) GeoJSONType() string { + return "Polygon" +} + +// Dimensions returns 2 because a Polygon is a 2d object. +func (p Polygon) Dimensions() int { + return 2 +} + +// Bound returns a bound around the polygon. +func (p Polygon) Bound() Bound { + if len(p) == 0 { + return emptyBound + } + return p[0].Bound() +} + +// Equal compares two polygons. Returns true if lengths are the same +// and all points are Equal. +func (p Polygon) Equal(polygon Polygon) bool { + if len(p) != len(polygon) { + return false + } + + for i := range p { + if !p[i].Equal(polygon[i]) { + return false + } + } + + return true +} + +// Clone returns a new deep copy of the polygon. +// All of the rings are also cloned. +func (p Polygon) Clone() Polygon { + if p == nil { + return p + } + + np := make(Polygon, 0, len(p)) + for _, r := range p { + np = append(np, r.Clone()) + } + + return np +} diff --git a/vendor/github.com/paulmach/orb/ring.go b/vendor/github.com/paulmach/orb/ring.go new file mode 100644 index 0000000..5fe88ac --- /dev/null +++ b/vendor/github.com/paulmach/orb/ring.go @@ -0,0 +1,75 @@ +package orb + +// Ring represents a set of ring on the earth. +type Ring LineString + +// GeoJSONType returns the GeoJSON type for the object. +func (r Ring) GeoJSONType() string { + return "Polygon" +} + +// Dimensions returns 2 because a Ring is a 2d object. +func (r Ring) Dimensions() int { + return 2 +} + +// Closed will return true if the ring is a real ring. +// ie. 4+ points and the first and last points match. +// NOTE: this will not check for self-intersection. +func (r Ring) Closed() bool { + return (len(r) >= 4) && (r[0] == r[len(r)-1]) +} + +// Reverse changes the direction of the ring. +// This is done inplace, ie. it modifies the original data. +func (r Ring) Reverse() { + LineString(r).Reverse() +} + +// Bound returns a rect around the ring. Uses rectangular coordinates. +func (r Ring) Bound() Bound { + return MultiPoint(r).Bound() +} + +// Orientation returns 1 if the the ring is in couter-clockwise order, +// return -1 if the ring is the clockwise order and 0 if the ring is +// degenerate and had no area. +func (r Ring) Orientation() Orientation { + area := 0.0 + + // This is a fast planar area computation, which is okay for this use. + // implicitly move everything to near the origin to help with roundoff + offsetX := r[0][0] + offsetY := r[0][1] + for i := 1; i < len(r)-1; i++ { + area += (r[i][0]-offsetX)*(r[i+1][1]-offsetY) - + (r[i+1][0]-offsetX)*(r[i][1]-offsetY) + } + + if area > 0 { + return CCW + } + + if area < 0 { + return CW + } + + // degenerate case, no area + return 0 +} + +// Equal compares two rings. Returns true if lengths are the same +// and all points are Equal. +func (r Ring) Equal(ring Ring) bool { + return MultiPoint(r).Equal(MultiPoint(ring)) +} + +// Clone returns a new copy of the ring. +func (r Ring) Clone() Ring { + if r == nil { + return nil + } + + ps := MultiPoint(r) + return Ring(ps.Clone()) +} diff --git a/vendor/github.com/paulmach/orb/round.go b/vendor/github.com/paulmach/orb/round.go new file mode 100644 index 0000000..b6c7799 --- /dev/null +++ b/vendor/github.com/paulmach/orb/round.go @@ -0,0 +1,100 @@ +package orb + +import ( + "fmt" + "math" +) + +// Round will round all the coordinates of the geometry to the given factor. +// The default is 6 decimal places. +func Round(g Geometry, factor ...int) Geometry { + if g == nil { + return nil + } + + f := float64(DefaultRoundingFactor) + if len(factor) > 0 { + f = float64(factor[0]) + } + + switch g := g.(type) { + case Point: + return Point{ + math.Round(g[0]*f) / f, + math.Round(g[1]*f) / f, + } + case MultiPoint: + if g == nil { + return nil + } + roundPoints([]Point(g), f) + return g + case LineString: + if g == nil { + return nil + } + roundPoints([]Point(g), f) + return g + case MultiLineString: + if g == nil { + return nil + } + for _, ls := range g { + roundPoints([]Point(ls), f) + } + return g + case Ring: + if g == nil { + return nil + } + roundPoints([]Point(g), f) + return g + case Polygon: + if g == nil { + return nil + } + for _, r := range g { + roundPoints([]Point(r), f) + } + return g + case MultiPolygon: + if g == nil { + return nil + } + for _, p := range g { + for _, r := range p { + roundPoints([]Point(r), f) + } + } + return g + case Collection: + if g == nil { + return nil + } + + for i := range g { + g[i] = Round(g[i], int(f)) + } + return g + case Bound: + return Bound{ + Min: Point{ + math.Round(g.Min[0]*f) / f, + math.Round(g.Min[1]*f) / f, + }, + Max: Point{ + math.Round(g.Max[0]*f) / f, + math.Round(g.Max[1]*f) / f, + }, + } + } + + panic(fmt.Sprintf("geometry type not supported: %T", g)) +} + +func roundPoints(ps []Point, f float64) { + for i := range ps { + ps[i][0] = math.Round(ps[i][0]*f) / f + ps[i][1] = math.Round(ps[i][1]*f) / f + } +} diff --git a/vendor/github.com/tilezen/go-tilepacks/tilepack/communication.go b/vendor/github.com/tilezen/go-tilepacks/tilepack/communication.go index 80c2890..c1d6809 100644 --- a/vendor/github.com/tilezen/go-tilepacks/tilepack/communication.go +++ b/vendor/github.com/tilezen/go-tilepacks/tilepack/communication.go @@ -1,12 +1,16 @@ package tilepack +import ( + "github.com/paulmach/orb/maptile" +) + type TileRequest struct { - Tile *Tile + Tile maptile.Tile URL string } type TileResponse struct { - Tile *Tile + Tile maptile.Tile Data []byte Elapsed float64 } diff --git a/vendor/github.com/tilezen/go-tilepacks/tilepack/disk_outputter.go b/vendor/github.com/tilezen/go-tilepacks/tilepack/disk_outputter.go index d311be1..82b9df6 100644 --- a/vendor/github.com/tilezen/go-tilepacks/tilepack/disk_outputter.go +++ b/vendor/github.com/tilezen/go-tilepacks/tilepack/disk_outputter.go @@ -7,6 +7,7 @@ import ( "path/filepath" "github.com/aaronland/go-string/dsn" + "github.com/paulmach/orb/maptile" ) type diskOutputter struct { @@ -73,7 +74,7 @@ func (o *diskOutputter) CreateTiles() error { return nil } -func (o *diskOutputter) Save(tile *Tile, data []byte) error { +func (o *diskOutputter) Save(tile maptile.Tile, data []byte) error { relPath := fmt.Sprintf("%d/%d/%d.%s", tile.Z, tile.X, tile.Y, o.format) absPath := filepath.Join(o.root, relPath) @@ -91,13 +92,11 @@ func (o *diskOutputter) Save(tile *Tile, data []byte) error { } fh, err := os.OpenFile(absPath, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { return err } _, err = fh.Write(data) - if err != nil { fh.Close() return err diff --git a/vendor/github.com/tilezen/go-tilepacks/tilepack/http_job_creator.go b/vendor/github.com/tilezen/go-tilepacks/tilepack/http_job_creator.go index b0597cc..cec3614 100644 --- a/vendor/github.com/tilezen/go-tilepacks/tilepack/http_job_creator.go +++ b/vendor/github.com/tilezen/go-tilepacks/tilepack/http_job_creator.go @@ -6,13 +6,15 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "math/rand" "net/http" "os" "strings" "time" + + "github.com/paulmach/orb" + "github.com/paulmach/orb/maptile" ) type HTTPError struct { @@ -32,7 +34,7 @@ const ( httpUserAgent = "go-tilepacks/1.0" ) -func NewXYZJobGenerator(urlTemplate string, bounds *LngLatBbox, zooms []uint, httpTimeout time.Duration, invertedY bool, ensureGzip bool) (JobGenerator, error) { +func NewXYZJobGenerator(urlTemplate string, bounds orb.Bound, zooms []maptile.Zoom, httpTimeout time.Duration, invertedY bool, ensureGzip bool) (JobGenerator, error) { // Configure the HTTP client with a timeout and connection pools httpClient := &http.Client{} httpClient.Timeout = httpTimeout @@ -52,7 +54,7 @@ func NewXYZJobGenerator(urlTemplate string, bounds *LngLatBbox, zooms []uint, ht }, nil } -func NewFileTransportXYZJobGenerator(root string, urlTemplate string, bounds *LngLatBbox, zooms []uint, httpTimeout time.Duration, invertedY bool, ensureGzip bool) (JobGenerator, error) { +func NewFileTransportXYZJobGenerator(root string, urlTemplate string, bounds orb.Bound, zooms []maptile.Zoom, httpTimeout time.Duration, invertedY bool, ensureGzip bool) (JobGenerator, error) { info, err := os.Stat(root) @@ -84,8 +86,8 @@ func NewFileTransportXYZJobGenerator(root string, urlTemplate string, bounds *Ln type xyzJobGenerator struct { httpClient *http.Client urlTemplate string - bounds *LngLatBbox - zooms []uint + bounds orb.Bound + zooms []maptile.Zoom invertedY bool ensureGzip bool } @@ -155,11 +157,11 @@ func (x *xyzJobGenerator) CreateWorker() (func(id int, jobs chan *TileRequest, r switch contentEncoding { case "gzip": // If the server reports content encoding of gzip, we can just copy the bytes as-is - bodyData, err = ioutil.ReadAll(resp.Body) + bodyData, err = io.ReadAll(resp.Body) default: if !x.ensureGzip { - bodyData, err = ioutil.ReadAll(resp.Body) + bodyData, err = io.ReadAll(resp.Body) } else { // Otherwise we'll gzip the data, so we should @@ -179,7 +181,7 @@ func (x *xyzJobGenerator) CreateWorker() (func(id int, jobs chan *TileRequest, r continue } - bodyData, err = ioutil.ReadAll(bodyBuffer) + bodyData, err = io.ReadAll(bodyBuffer) } if err != nil { @@ -211,7 +213,7 @@ func (x *xyzJobGenerator) CreateWorker() (func(id int, jobs chan *TileRequest, r } func (x *xyzJobGenerator) CreateJobs(jobs chan *TileRequest) error { - consumer := func(tile *Tile) { + consumer := func(tile maptile.Tile) { url := strings.NewReplacer( "{x}", fmt.Sprintf("%d", tile.X), "{y}", fmt.Sprintf("%d", tile.Y), diff --git a/vendor/github.com/tilezen/go-tilepacks/tilepack/mbtiles_outputter.go b/vendor/github.com/tilezen/go-tilepacks/tilepack/mbtiles_outputter.go index 4f7e7e2..6bddd53 100644 --- a/vendor/github.com/tilezen/go-tilepacks/tilepack/mbtiles_outputter.go +++ b/vendor/github.com/tilezen/go-tilepacks/tilepack/mbtiles_outputter.go @@ -4,29 +4,28 @@ import ( "crypto/md5" "database/sql" "encoding/hex" + "math" _ "github.com/mattn/go-sqlite3" // Register sqlite3 database driver + "github.com/paulmach/orb/maptile" ) -const ( - batchSize = 1000 -) - -func NewMbtilesOutputter(dsn string) (*mbtilesOutputter, error) { +func NewMbtilesOutputter(dsn string, batchSize int) (*mbtilesOutputter, error) { db, err := sql.Open("sqlite3", dsn) if err != nil { return nil, err } - return &mbtilesOutputter{db: db}, nil + return &mbtilesOutputter{db: db, batchSize: batchSize}, nil } type mbtilesOutputter struct { TileOutputter db *sql.DB txn *sql.Tx - batchCount int hasTiles bool + batchCount int + batchSize int } func (o *mbtilesOutputter) Close() error { @@ -85,7 +84,7 @@ func (o *mbtilesOutputter) CreateTiles() error { return nil } -func (o *mbtilesOutputter) Save(tile *Tile, data []byte) error { +func (o *mbtilesOutputter) Save(tile maptile.Tile, data []byte) error { if err := o.CreateTiles(); err != nil { return err } @@ -101,19 +100,21 @@ func (o *mbtilesOutputter) Save(tile *Tile, data []byte) error { hash := md5.Sum(data) tileID := hex.EncodeToString(hash[:]) + invertedY := uint32(math.Pow(2.0, float64(tile.Z))) - 1 - tile.Y + _, err := o.txn.Exec("INSERT OR REPLACE INTO images (tile_id, tile_data) VALUES (?, ?);", tileID, data) if err != nil { return err } - _, err = o.txn.Exec("INSERT OR REPLACE INTO map (zoom_level, tile_column, tile_row, tile_id) VALUES (?, ?, ?, ?);", tile.Z, tile.X, tile.Y, tileID) + _, err = o.txn.Exec("INSERT OR REPLACE INTO map (zoom_level, tile_column, tile_row, tile_id) VALUES (?, ?, ?, ?);", tile.Z, tile.X, invertedY, tileID) if err != nil { return err } o.batchCount++ - if o.batchCount%batchSize == 0 { + if o.batchCount%o.batchSize == 0 { err := o.txn.Commit() if err != nil { return err diff --git a/vendor/github.com/tilezen/go-tilepacks/tilepack/mbtiles_reader.go b/vendor/github.com/tilezen/go-tilepacks/tilepack/mbtiles_reader.go index 386b2e8..dae312e 100644 --- a/vendor/github.com/tilezen/go-tilepacks/tilepack/mbtiles_reader.go +++ b/vendor/github.com/tilezen/go-tilepacks/tilepack/mbtiles_reader.go @@ -5,17 +5,18 @@ import ( "log" _ "github.com/mattn/go-sqlite3" // Register sqlite3 database driver + "github.com/paulmach/orb/maptile" ) type TileData struct { - Tile *Tile + Tile maptile.Tile Data *[]byte } type MbtilesReader interface { Close() error - GetTile(tile *Tile) (*TileData, error) - VisitAllTiles(visitor func(*Tile, []byte)) error + GetTile(tile maptile.Tile) (*TileData, error) + VisitAllTiles(visitor func(maptile.Tile, []byte)) error } type tileDataFromDatabase struct { @@ -53,7 +54,7 @@ func (o *mbtilesReader) Close() error { } // GetTile returns data for the given tile. -func (o *mbtilesReader) GetTile(tile *Tile) (*TileData, error) { +func (o *mbtilesReader) GetTile(tile maptile.Tile) (*TileData, error) { var data []byte result := o.db.QueryRow("SELECT tile_data FROM tiles WHERE zoom_level=? AND tile_column=? AND tile_row=? LIMIT 1", tile.Z, tile.X, tile.Y) @@ -76,13 +77,14 @@ func (o *mbtilesReader) GetTile(tile *Tile) (*TileData, error) { } // VisitAllTiles runs the given function on all tiles in this mbtiles archive. -func (o *mbtilesReader) VisitAllTiles(visitor func(*Tile, []byte)) error { +func (o *mbtilesReader) VisitAllTiles(visitor func(maptile.Tile, []byte)) error { rows, err := o.db.Query("SELECT zoom_level, tile_column, tile_row, tile_data FROM tiles") if err != nil { return err } - var z, x, y uint + var x, y uint32 + var z maptile.Zoom for rows.Next() { data := []byte{} err := rows.Scan(&z, &x, &y, &data) @@ -90,7 +92,7 @@ func (o *mbtilesReader) VisitAllTiles(visitor func(*Tile, []byte)) error { log.Printf("Couldn't scan row: %+v", err) } - t := &Tile{Z: z, X: x, Y: y} + t := maptile.New(x, y, z) visitor(t, data) } return nil diff --git a/vendor/github.com/tilezen/go-tilepacks/tilepack/metatile_job_creator.go b/vendor/github.com/tilezen/go-tilepacks/tilepack/metatile_job_creator.go index 570dff5..01fb390 100644 --- a/vendor/github.com/tilezen/go-tilepacks/tilepack/metatile_job_creator.go +++ b/vendor/github.com/tilezen/go-tilepacks/tilepack/metatile_job_creator.go @@ -7,7 +7,7 @@ import ( "crypto/md5" "encoding/hex" "fmt" - "io/ioutil" + "io" "log" "math" "strings" @@ -16,6 +16,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/paulmach/orb" + "github.com/paulmach/orb/maptile" ) const ( @@ -26,7 +28,7 @@ func log2Uint(size uint) uint { return uint(math.Log2(float64(size))) } -func NewMetatileJobGenerator(bucket string, pathTemplate string, layerName string, metatileSize uint, maxDetailZoom uint, zooms []uint, bounds *LngLatBbox) (JobGenerator, error) { +func NewMetatileJobGenerator(bucket string, requesterPays bool, pathTemplate string, layerName string, format string, metatileSize uint, maxDetailZoom maptile.Zoom, zooms []maptile.Zoom, bounds orb.Bound) (JobGenerator, error) { sess, err := session.NewSessionWithOptions(session.Options{ SharedConfigState: session.SharedConfigEnable, }) @@ -34,29 +36,39 @@ func NewMetatileJobGenerator(bucket string, pathTemplate string, layerName strin return nil, err } - downloader := s3manager.NewDownloader(sess) + downloader := s3manager.NewDownloader( + sess, + func(downloader *s3manager.Downloader) { + // See https://levyeran.medium.com/high-memory-allocations-and-gc-cycles-while-downloading-large-s3-objects-using-the-aws-sdk-for-go-e776a136c5d0 + downloader.BufferProvider = s3manager.NewPooledBufferedWriterReadFromProvider(15 * 1024 * 1024) + }, + ) return &metatileJobGenerator{ s3Client: downloader, bucket: bucket, + requesterPays: requesterPays, pathTemplate: pathTemplate, layerName: layerName, metatileSize: metatileSize, maxDetailZoom: maxDetailZoom, bounds: bounds, zooms: zooms, + format: format, }, nil } type metatileJobGenerator struct { s3Client *s3manager.Downloader bucket string + requesterPays bool pathTemplate string layerName string metatileSize uint - maxDetailZoom uint - bounds *LngLatBbox - zooms []uint + maxDetailZoom maptile.Zoom + bounds orb.Bound + zooms []maptile.Zoom + format string } func (x *metatileJobGenerator) CreateWorker() (func(id int, jobs chan *TileRequest, results chan *TileResponse), error) { @@ -68,10 +80,16 @@ func (x *metatileJobGenerator) CreateWorker() (func(id int, jobs chan *TileReque for metaTileRequest := range jobs { // Download the metatile archive zip to a byte buffer compressedBytes := &aws.WriteAtBuffer{} - numBytes, err := x.s3Client.Download(compressedBytes, &s3.GetObjectInput{ + input := &s3.GetObjectInput{ Bucket: aws.String(x.bucket), Key: aws.String(metaTileRequest.URL), - }) + } + + if x.requesterPays { + input.RequestPayer = aws.String("requester") + } + + numBytes, err := x.s3Client.Download(compressedBytes, input) if err != nil { log.Printf("Unable to download item s3://%s/%s: %+v", x.bucket, metaTileRequest.URL, err) continue @@ -88,24 +106,29 @@ func (x *metatileJobGenerator) CreateWorker() (func(id int, jobs chan *TileReque // Iterate over the contents of the zip and add them as TileResponses for _, zf := range zippedReader.File { - var offsetZ, offsetX, offsetY uint - // TODO Pull in the format too? - if n, err := fmt.Sscanf(zf.Name, "%d/%d/%d.mvt", &offsetZ, &offsetX, &offsetY); err != nil || n != 3 { + var offsetZ, offsetX, offsetY uint32 + var format string + if n, err := fmt.Sscanf(zf.Name, "%d/%d/%d.%s", &offsetZ, &offsetX, &offsetY, &format); err != nil || n != 4 { log.Fatalf("Couldn't scan metatile name") } - // Add the offset to metatile to get the actual tile - t := &Tile{ - Z: metaTileRequest.Tile.Z + offsetZ, - X: (metaTileRequest.Tile.X << offsetZ) + offsetX, - Y: (metaTileRequest.Tile.Y << offsetZ) + offsetY, + // Skip formats we don't care about + if format != x.format { + continue } + // Add the offset to metatile to get the actual tile + t := maptile.New( + (metaTileRequest.Tile.X< b.South) && (o.South < b.North) - lngOverlaps := (o.East > b.West) && (o.West < b.East) - return latOverlaps && lngOverlaps -} - -//Bbox holds Spherical Mercator bounding box of a tile -type Bbox struct { - Left, Bottom, Right, Top float64 -} - -//XY holds a Spherical Mercator point -type XY struct { - X, Y float64 -} - -func deg2rad(deg float64) float64 { - return deg * (math.Pi / oneEighty) -} - -func rad2deg(rad float64) float64 { - return rad * (oneEighty / math.Pi) -} - -func min(a uint, b uint) uint { - if a < b { - return a - } - return b -} - -func pow(a, b int) int { - result := 1 - - for 0 != b { - if 0 != (b & 1) { - result *= a +type GenerateBoxesConsumerFunc func(ll maptile.Tile, ur maptile.Tile, z maptile.Zoom) - } - b >>= 1 - a *= a - } - - return result +type GenerateRangesOptions struct { + Bounds orb.Bound + Zooms []maptile.Zoom + ConsumerFunc GenerateBoxesConsumerFunc } -// GetTile returns a tile for a given longitude latitude and zoom level -func GetTile(lng float64, lat float64, zoom uint) *Tile { - - latRad := deg2rad(lat) - n := math.Pow(2.0, float64(zoom)) - x := uint(math.Floor((lng + oneEighty) / threeSixty * n)) - y := uint(math.Floor((1.0 - math.Log(math.Tan(latRad)+(1.0/math.Cos(latRad)))/math.Pi) / 2.0 * n)) - - return &Tile{x, y, zoom} +type GenerateTilesConsumerFunc func(tile maptile.Tile) +type GenerateTilesOptions struct { + Bounds orb.Bound + Zooms []maptile.Zoom + ConsumerFunc GenerateTilesConsumerFunc + InvertedY bool } -func GenerateTiles(opts *GenerateTilesOptions) { - +func GenerateTileRanges(opts *GenerateRangesOptions) { bounds := opts.Bounds zooms := opts.Zooms consumer := opts.ConsumerFunc - var boxes []*LngLatBbox - if bounds.West > bounds.East { - boxes = []*LngLatBbox{ - &LngLatBbox{-180.0, bounds.South, bounds.East, bounds.North}, - &LngLatBbox{bounds.West, bounds.South, 180.0, bounds.North}, + var boxes []orb.Bound + if bounds.Min.X() > bounds.Max.X() { + boxes = []orb.Bound{ + { + Min: orb.Point{-180.0, bounds.Min.Y()}, + Max: bounds.Max, + }, + { + Min: bounds.Min, + Max: orb.Point{180.0, bounds.Max.Y()}, + }, } } else { - boxes = []*LngLatBbox{bounds} + boxes = []orb.Bound{bounds} } for _, box := range boxes { // Clamp the individual boxes to web mercator limits - clampedBox := &LngLatBbox{ - West: math.Max(-180.0, box.West), - South: math.Max(-webMercatorLatLimit, box.South), - East: math.Min(180.0, box.East), - North: math.Min(webMercatorLatLimit, box.North), + clampedBox := orb.Bound{ + Min: orb.Point{ + math.Max(-180.0, box.Min.X()), + math.Max(-webMercatorLatLimit, box.Min.Y()), + }, + Max: orb.Point{ + math.Min(180.0-0.00000001, box.Max.X()), + math.Min(webMercatorLatLimit, box.Max.Y()), + }, } for _, z := range zooms { + minTile := maptile.At(clampedBox.Min, z) + maxTile := maptile.At(clampedBox.Max, z) - ll := GetTile(clampedBox.West, clampedBox.South, z) - ur := GetTile(clampedBox.East, clampedBox.North, z) - - llx := ll.X - if llx < 0 { - llx = 0 - } - - ury := ur.Y - if ury < 0 { - ury = 0 - } + // Flip Y because the XYZ tiling scheme has an inverted Y compared to lat/lon + maxTile.Y, minTile.Y = minTile.Y, maxTile.Y - for i := llx; i < min(ur.X+1, 1< float64(math.MaxInt64) { + return emptyValue, fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch t.Kind() { + case reflect.Int8: + if i64 < math.MinInt8 || i64 > math.MaxInt8 { + return emptyValue, fmt.Errorf("%d overflows int8", i64) + } + + return reflect.ValueOf(int8(i64)), nil + case reflect.Int16: + if i64 < math.MinInt16 || i64 > math.MaxInt16 { + return emptyValue, fmt.Errorf("%d overflows int16", i64) + } + + return reflect.ValueOf(int16(i64)), nil + case reflect.Int32: + if i64 < math.MinInt32 || i64 > math.MaxInt32 { + return emptyValue, fmt.Errorf("%d overflows int32", i64) + } + + return reflect.ValueOf(int32(i64)), nil + case reflect.Int64: + return reflect.ValueOf(i64), nil + case reflect.Int: + if int64(int(i64)) != i64 { // Can we fit this inside of an int + return emptyValue, fmt.Errorf("%d overflows int", i64) + } + + return reflect.ValueOf(int(i64)), nil + default: + return emptyValue, ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.Zero(t), + } + } +} + +// IntDecodeValue is the ValueDecoderFunc for int types. +func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } + } + + elem, err := dvd.intDecodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.SetInt(elem.Int()) + return nil +} + +// UintDecodeValue is the ValueDecoderFunc for uint types. +// +// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var i64 int64 + var err error + switch vr.Type() { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return err + } + if b { + i64 = 1 + } + default: + return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) + } + + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + switch val.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return fmt.Errorf("%d overflows uint8", i64) + } + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return fmt.Errorf("%d overflows uint16", i64) + } + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return fmt.Errorf("%d overflows uint32", i64) + } + case reflect.Uint64: + if i64 < 0 { + return fmt.Errorf("%d overflows uint64", i64) + } + case reflect.Uint: + if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + return fmt.Errorf("%d overflows uint", i64) + } + default: + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + val.SetUint(uint64(i64)) + return nil +} + +func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var f float64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + f = float64(i32) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + f = float64(i64) + case bsontype.Double: + f, err = vr.ReadDouble() + if err != nil { + return emptyValue, err + } + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + f = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) + } + + switch t.Kind() { + case reflect.Float32: + if !ec.Truncate && float64(float32(f)) != f { + return emptyValue, errCannotTruncate + } + + return reflect.ValueOf(float32(f)), nil + case reflect.Float64: + return reflect.ValueOf(f), nil + default: + return emptyValue, ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: reflect.Zero(t), + } + } +} + +// FloatDecodeValue is the ValueDecoderFunc for float types. +func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: val, + } + } + + elem, err := dvd.floatDecodeType(ec, vr, val.Type()) + if err != nil { + return err + } + + val.SetFloat(elem.Float()) + return nil +} + +// StringDecodeValue is the ValueDecoderFunc for string types. +// +// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var str string + var err error + switch vr.Type() { + // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + val.SetString(str) + return nil +} + +func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tJavaScript { + return emptyValue, ValueDecoderError{ + Name: "JavaScriptDecodeValue", + Types: []reflect.Type{tJavaScript}, + Received: reflect.Zero(t), + } + } + + var js string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.JavaScript: + js, err = vr.ReadJavascript() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.JavaScript(js)), nil +} + +// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJavaScript { + return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} + +func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tSymbol { + return emptyValue, ValueDecoderError{ + Name: "SymbolDecodeValue", + Types: []reflect.Type{tSymbol}, + Received: reflect.Zero(t), + } + } + + var symbol string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + symbol, err = vr.ReadString() + case bsontype.Symbol: + symbol, err = vr.ReadSymbol() + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return emptyValue, err + } + + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} + } + symbol = string(data) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Symbol(symbol)), nil +} + +// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tSymbol { + return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} + +func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tBinary { + return emptyValue, ValueDecoderError{ + Name: "BinaryDecodeValue", + Types: []reflect.Type{tBinary}, + Received: reflect.Zero(t), + } + } + + var data []byte + var subtype byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Binary: + data, subtype, err = vr.ReadBinary() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil +} + +// BinaryDecodeValue is the ValueDecoderFunc for Binary. +func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tBinary { + return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + + elem, err := dvd.binaryDecodeType(dc, vr, tBinary) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tUndefined { + return emptyValue, ValueDecoderError{ + Name: "UndefinedDecodeValue", + Types: []reflect.Type{tUndefined}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Undefined{}), nil +} + +// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tUndefined { + return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// Accept both 12-byte string and pretty-printed 24-byte hex string formats. +func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tOID { + return emptyValue, ValueDecoderError{ + Name: "ObjectIDDecodeValue", + Types: []reflect.Type{tOID}, + Received: reflect.Zero(t), + } + } + + var oid primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.ObjectID: + oid, err = vr.ReadObjectID() + if err != nil { + return emptyValue, err + } + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + if oid, err = primitive.ObjectIDFromHex(str); err == nil { + break + } + if len(str) != 12 { + return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) + } + byteArr := []byte(str) + copy(oid[:], byteArr) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) + } + + return reflect.ValueOf(oid), nil +} + +// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tOID { + return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} + } + + elem, err := dvd.objectIDDecodeType(dc, vr, tOID) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDateTime { + return emptyValue, ValueDecoderError{ + Name: "DateTimeDecodeValue", + Types: []reflect.Type{tDateTime}, + Received: reflect.Zero(t), + } + } + + var dt int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err = vr.ReadDateTime() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.DateTime(dt)), nil +} + +// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDateTime { + return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tNull { + return emptyValue, ValueDecoderError{ + Name: "NullDecodeValue", + Types: []reflect.Type{tNull}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Null{}), nil +} + +// NullDecodeValue is the ValueDecoderFunc for Null. +func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tNull { + return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + elem, err := dvd.nullDecodeType(dc, vr, tNull) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tRegex { + return emptyValue, ValueDecoderError{ + Name: "RegexDecodeValue", + Types: []reflect.Type{tRegex}, + Received: reflect.Zero(t), + } + } + + var pattern, options string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Regex: + pattern, options, err = vr.ReadRegex() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil +} + +// RegexDecodeValue is the ValueDecoderFunc for Regex. +func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tRegex { + return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + elem, err := dvd.regexDecodeType(dc, vr, tRegex) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDBPointer { + return emptyValue, ValueDecoderError{ + Name: "DBPointerDecodeValue", + Types: []reflect.Type{tDBPointer}, + Received: reflect.Zero(t), + } + } + + var ns string + var pointer primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DBPointer: + ns, pointer, err = vr.ReadDBPointer() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil +} + +// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDBPointer { + return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { + if reflectType != tTimestamp { + return emptyValue, ValueDecoderError{ + Name: "TimestampDecodeValue", + Types: []reflect.Type{tTimestamp}, + Received: reflect.Zero(reflectType), + } + } + + var t, incr uint32 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Timestamp: + t, incr, err = vr.ReadTimestamp() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil +} + +// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTimestamp { + return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tMinKey { + return emptyValue, ValueDecoderError{ + Name: "MinKeyDecodeValue", + Types: []reflect.Type{tMinKey}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MinKey: + err = vr.ReadMinKey() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.MinKey{}), nil +} + +// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMinKey { + return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tMaxKey { + return emptyValue, ValueDecoderError{ + Name: "MaxKeyDecodeValue", + Types: []reflect.Type{tMaxKey}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MaxKey: + err = vr.ReadMaxKey() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.MaxKey{}), nil +} + +// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMaxKey { + return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDecimal { + return emptyValue, ValueDecoderError{ + Name: "Decimal128DecodeValue", + Types: []reflect.Type{tDecimal}, + Received: reflect.Zero(t), + } + } + + var d128 primitive.Decimal128 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Decimal128: + d128, err = vr.ReadDecimal128() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(d128), nil +} + +// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDecimal { + return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + + elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tJSONNumber { + return emptyValue, ValueDecoderError{ + Name: "JSONNumberDecodeValue", + Types: []reflect.Type{tJSONNumber}, + Received: reflect.Zero(t), + } + } + + var jsonNum json.Number + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatInt(i64, 10)) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(jsonNum), nil +} + +// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJSONNumber { + return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + + elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tURL { + return emptyValue, ValueDecoderError{ + Name: "URLDecodeValue", + Types: []reflect.Type{tURL}, + Received: reflect.Zero(t), + } + } + + urlPtr := &url.URL{} + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + var str string // Declare str here to avoid shadowing err during the ReadString call. + str, err = vr.ReadString() + if err != nil { + return emptyValue, err + } + + urlPtr, err = url.Parse(str) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(urlPtr).Elem(), nil +} + +// URLDecodeValue is the ValueDecoderFunc for url.URL. +func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tURL { + return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} + } + + elem, err := dvd.urlDecodeType(dc, vr, tURL) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// TimeDecodeValue is the ValueDecoderFunc for time.Time. +// +// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.DateTime { + return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) + } + + dt, err := vr.ReadDateTime() + if err != nil { + return err + } + + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) + return nil +} + +// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. +// +// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { + return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) + } + + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != 0x00 { + return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) + } + + val.Set(reflect.ValueOf(data)) + return nil +} + +// MapDecodeValue is the ValueDecoderFunc for map[string]* types. +// +// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + elem := reflect.New(eType).Elem() + + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) + } + return nil +} + +// ArrayDecodeValue is the ValueDecoderFunc for array types. +func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if len(data) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) + } + + for idx, elem := range data { + val.Index(idx).Set(reflect.ValueOf(elem)) + } + return nil + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + default: + return fmt.Errorf("cannot decode %v into an array", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if len(elems) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) + } + + for idx, elem := range elems { + val.Index(idx).Set(elem) + } + + return nil +} + +// SliceDecodeValue is the ValueDecoderFunc for slice types. +// +// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vr.Type() { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + default: + return fmt.Errorf("cannot decode %v into a slice", vr.Type()) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} + +// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + if !val.Type().Implements(tValueUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + + t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") + errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] + if !errVal.IsNil() { + return errVal.Interface().(error) + } + return nil +} + +// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + + if !val.Type().Implements(tUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + + fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") + errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] + if !errVal.IsNil() { + return errVal.Interface().(error) + } + return nil +} + +// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. +// +// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + rtype, err := dc.LookupTypeMapEntry(vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.EmbeddedDocument: + if dc.Ancestor != nil { + rtype = dc.Ancestor + break + } + rtype = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return err + } + + elem := reflect.New(rtype).Elem() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. +func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreDocument { + return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + + cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) + val.Set(reflect.ValueOf(cdoc)) + return err +} + +func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { + elems := make([]reflect.Value, 0) + + ar, err := vr.ReadArray() + if err != nil { + return nil, err + } + + eType := val.Type().Elem() + + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return nil, err + } + eTypeDecoder, _ := decoder.(typeDecoder) + + idx := 0 + for { + vr, err := ar.ReadValue() + if err == bsonrw.ErrEOA { + break + } + if err != nil { + return nil, err + } + + elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) + if err != nil { + return nil, newDecodeError(strconv.Itoa(idx), err) + } + elems = append(elems, elem) + idx++ + } + + return elems, nil +} + +func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { + var cws primitive.CodeWithScope + + code, dr, err := vr.ReadCodeWithScope() + if err != nil { + return cws, err + } + + scope := reflect.New(tD).Elem() + elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) + if err != nil { + return cws, err + } + + scope.Set(reflect.MakeSlice(tD, 0, len(elems))) + scope.Set(reflect.Append(scope, elems...)) + + cws = primitive.CodeWithScope{ + Code: primitive.JavaScript(code), + Scope: scope.Interface().(primitive.D), + } + return cws, nil +} + +func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tCodeWithScope { + return emptyValue, ValueDecoderError{ + Name: "CodeWithScopeDecodeValue", + Types: []reflect.Type{tCodeWithScope}, + Received: reflect.Zero(t), + } + } + + var cws primitive.CodeWithScope + var err error + switch vrType := vr.Type(); vrType { + case bsontype.CodeWithScope: + cws, err = dvd.readCodeWithScope(dc, vr) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(cws), nil +} + +// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCodeWithScope { + return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + default: + return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return nil, err + } + + return dvd.decodeElemsFromDocumentReader(dc, dr) +} + +func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return nil, err + } + + elems := make([]reflect.Value, 0) + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return nil, err + } + + val := reflect.New(tEmpty).Elem() + err = decoder.DecodeValue(dc, vr, val) + if err != nil { + return nil, newDecodeError(key, err) + } + + elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) + } + + return elems, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go new file mode 100644 index 0000000..6bdb43c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -0,0 +1,766 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var defaultValueEncoders DefaultValueEncoders + +var bvwPool = bsonrw.NewBSONValueWriterPool() + +var errInvalidValue = errors.New("cannot encode invalid element") + +var sliceWriterPool = sync.Pool{ + New: func() interface{} { + sw := make(bsonrw.SliceWriter, 0) + return &sw + }, +} + +func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { + vw, err := dw.WriteDocumentElement(e.Key) + if err != nil { + return err + } + + if e.Value == nil { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) + if err != nil { + return err + } + return nil +} + +// DefaultValueEncoders is a namespace type for the default ValueEncoders used +// when creating a registry. +type DefaultValueEncoders struct{} + +// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with +// the provided RegistryBuilder. +func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) + } + rb. + RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeEncoder(tTime, defaultTimeCodec). + RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeEncoder(tCoreArray, defaultArrayCodec). + RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). + RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). + RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). + RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). + RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). + RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). + RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). + RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). + RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). + RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). + RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). + RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). + RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). + RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). + RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). + RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). + RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). + RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). + RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). + RegisterDefaultEncoder(reflect.Map, defaultMapCodec). + RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultEncoder(reflect.String, defaultStringCodec). + RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). + RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). + RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). + RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). + RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) +} + +// BooleanEncodeValue is the ValueEncoderFunc for bool types. +func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Bool { + return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + return vw.WriteBoolean(val.Bool()) +} + +func fitsIn32Bits(i int64) bool { + return math.MinInt32 <= i && i <= math.MaxInt32 +} + +// IntEncodeValue is the ValueEncoderFunc for int types. +func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32: + return vw.WriteInt32(int32(val.Int())) + case reflect.Int: + i64 := val.Int() + if fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + case reflect.Int64: + i64 := val.Int() + if ec.MinSize && fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + } + + return ValueEncoderError{ + Name: "IntEncodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } +} + +// UintEncodeValue is the ValueEncoderFunc for uint types. +// +// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. +func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + if ec.MinSize && u64 <= math.MaxInt32 { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +// FloatEncodeValue is the ValueEncoderFunc for float types. +func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Float32, reflect.Float64: + return vw.WriteDouble(val.Float()) + } + + return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} +} + +// StringEncodeValue is the ValueEncoderFunc for string types. +// +// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. +func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tOID { + return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} + } + return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) +} + +// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. +func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDecimal { + return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) +} + +// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJSONNumber { + return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + jsnum := val.Interface().(json.Number) + + // Attempt int first, then float64 + if i64, err := jsnum.Int64(); err == nil { + return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) + } + + f64, err := jsnum.Float64() + if err != nil { + return err + } + + return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) +} + +// URLEncodeValue is the ValueEncoderFunc for url.URL. +func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tURL { + return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} + } + u := val.Interface().(url.URL) + return vw.WriteString(u.String()) +} + +// TimeEncodeValue is the ValueEncoderFunc for time.TIme. +// +// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. +func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) +} + +// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. +// +// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +// MapEncodeValue is the ValueEncoderFunc for map[string]* types. +// +// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. +func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() { + // If we have a nill map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return dve.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + if collisionFn != nil && collisionFn(key.String()) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(key.String()) + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// ArrayEncodeValue is the ValueEncoderFunc for array types. +func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().Elem() == tE { + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + e := val.Index(idx).Interface().(primitive.E) + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + var byteSlice []byte + for idx := 0; idx < val.Len(); idx++ { + byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) + } + return vw.WriteBinary(byteSlice) + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// SliceEncodeValue is the ValueEncoderFunc for slice types. +// +// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { + if origEncoder != nil || (currVal.Kind() != reflect.Interface) { + return origEncoder, currVal, nil + } + currVal = currVal.Elem() + if !currVal.IsValid() { + return nil, currVal, errInvalidValue + } + currEncoder, err := ec.LookupEncoder(currVal.Type()) + + return currEncoder, currVal, err +} + +// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. +// +// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement ValueMarshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + case val.Type().Implements(tValueMarshaler): + // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tValueMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + } + + fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") + returns := fn.Call(nil) + if !returns[2].IsNil() { + return returns[2].Interface().(error) + } + t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) + return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) +} + +// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. +func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Marshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + case val.Type().Implements(tMarshaler): + // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + } + + fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") + returns := fn.Call(nil) + if !returns[1].IsNil() { + return returns[1].Interface().(error) + } + data := returns[0].Interface().([]byte) + return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) +} + +// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Proxy + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + case val.Type().Implements(tProxy): + // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tProxy) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + } + + fn := val.Convert(tProxy).MethodByName("ProxyBSON") + returns := fn.Call(nil) + if !returns[1].IsNil() { + return returns[1].Interface().(error) + } + data := returns[0] + var encoder ValueEncoder + var err error + if data.Elem().IsValid() { + encoder, err = ec.LookupEncoder(data.Elem().Type()) + } else { + encoder, err = ec.LookupEncoder(nil) + } + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, data.Elem()) +} + +// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. +func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJavaScript { + return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + return vw.WriteJavascript(val.String()) +} + +// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. +func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tSymbol { + return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + return vw.WriteSymbol(val.String()) +} + +// BinaryEncodeValue is the ValueEncoderFunc for Binary. +func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tBinary { + return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + b := val.Interface().(primitive.Binary) + + return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) +} + +// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. +func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tUndefined { + return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + return vw.WriteUndefined() +} + +// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. +func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDateTime { + return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + return vw.WriteDateTime(val.Int()) +} + +// NullEncodeValue is the ValueEncoderFunc for Null. +func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tNull { + return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + return vw.WriteNull() +} + +// RegexEncodeValue is the ValueEncoderFunc for Regex. +func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tRegex { + return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + regex := val.Interface().(primitive.Regex) + + return vw.WriteRegex(regex.Pattern, regex.Options) +} + +// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. +func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDBPointer { + return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + dbp := val.Interface().(primitive.DBPointer) + + return vw.WriteDBPointer(dbp.DB, dbp.Pointer) +} + +// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. +func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTimestamp { + return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + ts := val.Interface().(primitive.Timestamp) + + return vw.WriteTimestamp(ts.T, ts.I) +} + +// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. +func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMinKey { + return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + return vw.WriteMinKey() +} + +// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. +func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMaxKey { + return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + return vw.WriteMaxKey() +} + +// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. +func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreDocument { + return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + cdoc := val.Interface().(bsoncore.Document) + + return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) +} + +// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCodeWithScope { + return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + cws := val.Interface().(primitive.CodeWithScope) + + dw, err := vw.WriteCodeWithScope(string(cws.Code)) + if err != nil { + return err + } + + sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) + defer sliceWriterPool.Put(sw) + *sw = (*sw)[:0] + + scopeVW := bvwPool.Get(sw) + defer bvwPool.Put(scopeVW) + + encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) + if err != nil { + return err + } + + err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) + if err != nil { + return err + } + return dw.WriteDocumentEnd() +} + +// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type +func isImplementationNil(val reflect.Value, inter reflect.Type) bool { + vt := val.Type() + for vt.Kind() == reflect.Ptr { + vt = vt.Elem() + } + return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go new file mode 100644 index 0000000..5f903eb --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -0,0 +1,90 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsoncodec provides a system for encoding values to BSON representations and decoding +// values from BSON representations. This package considers both binary BSON and ExtendedJSON as +// BSON representations. The types in this package enable a flexible system for handling this +// encoding and decoding. +// +// The codec system is composed of two parts: +// +// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON +// representations. +// +// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for +// retrieving them. +// +// # ValueEncoders and ValueDecoders +// +// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. +// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the +// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc +// is provided to allow use of a function with the correct signature as a ValueEncoder. An +// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and +// to provide configuration information. +// +// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that +// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to +// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext +// instance is provided and serves similar functionality to the EncodeContext. +// +// # Registry and RegistryBuilder +// +// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a +// RegistryBuilder, which handles three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods. +// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the +// interface, but not for values with concrete types that implement the interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods. +// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values +// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will +// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete +// type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when +// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64 +// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would +// change the behavior so these values decode as Go int instances instead: +// +// intType := reflect.TypeOf(int(0)) +// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder +// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the +// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. +// These methods should be used to change the behavior for all values for a specific kind. +// +// # Registry Lookup Procedure +// +// When looking up an encoder in a Registry, the precedence rules are as follows: +// +// 1. A type encoder registered for the exact type of the value. +// +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the +// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first +// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined +// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those +// will take precedence over any new hooks. +// +// 3. A kind encoder registered for the value's kind. +// +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence +// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is +// found. +// +// # DefaultValueEncoders and DefaultValueDecoders +// +// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and +// ValueDecoders for handling a wide range of Go types, including all of the types within the +// primitive package. To make registering these codecs easier, a helper method on each type is +// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for +// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also +// handles registering type map entries for each BSON type. +package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go new file mode 100644 index 0000000..eda417c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -0,0 +1,147 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// EmptyInterfaceCodec is the Codec used for interface{} values. +type EmptyInterfaceCodec struct { + DecodeBinaryAsSlice bool +} + +var ( + defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() + + _ ValueCodec = defaultEmptyInterfaceCodec + _ typeDecoder = defaultEmptyInterfaceCodec +) + +// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { + interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) + + codec := EmptyInterfaceCodec{} + if interfaceOpt.DecodeBinaryAsSlice != nil { + codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice + } + return &codec +} + +// EncodeValue is the ValueEncoderFunc for interface{}. +func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { + isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } + } + + rtype, err := dc.LookupTypeMapEntry(valueType) + if err == nil { + return rtype, nil + } + + if isDocument { + // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, + // depending on the original valueType. + var lookupType bsontype.Type + switch valueType { + case bsontype.Type(0): + lookupType = bsontype.EmbeddedDocument + case bsontype.EmbeddedDocument: + lookupType = bsontype.Type(0) + } + + rtype, err = dc.LookupTypeMapEntry(lookupType) + if err == nil { + return rtype, nil + } + } + + return nil, err +} + +func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tEmpty { + return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} + } + + rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.Null: + return reflect.Zero(t), vr.ReadNull() + default: + return emptyValue, err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return emptyValue, err + } + + elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) + if err != nil { + return emptyValue, err + } + + if eic.DecodeBinaryAsSlice && rtype == tBinary { + binElem := elem.Interface().(primitive.Binary) + if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { + elem = reflect.ValueOf(binElem.Data) + } + } + + return elem, nil +} + +// DecodeValue is the ValueDecoderFunc for interface{}. +func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + elem, err := eic.decodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.Set(elem) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go new file mode 100644 index 0000000..e1fbef9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -0,0 +1,309 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding" + "fmt" + "reflect" + "strconv" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultMapCodec = NewMapCodec() + +// MapCodec is the Codec used for map values. +type MapCodec struct { + DecodeZerosMap bool + EncodeNilAsEmpty bool + EncodeKeysWithStringer bool +} + +var _ ValueCodec = &MapCodec{} + +// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. +// This applies to types used as map keys and is similar to encoding.TextMarshaler. +type KeyMarshaler interface { + MarshalKey() (key string, err error) +} + +// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation +// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. +// +// UnmarshalKey must be able to decode the form generated by MarshalKey. +// UnmarshalKey must copy the text if it wishes to retain the text +// after returning. +type KeyUnmarshaler interface { + UnmarshalKey(key string) error +} + +// NewMapCodec returns a MapCodec with options opts. +func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { + mapOpt := bsonoptions.MergeMapCodecOptions(opts...) + + codec := MapCodec{} + if mapOpt.DecodeZerosMap != nil { + codec.DecodeZerosMap = *mapOpt.DecodeZerosMap + } + if mapOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty + } + if mapOpt.EncodeKeysWithStringer != nil { + codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer + } + return &codec +} + +// EncodeValue is the ValueEncoder for map[*]* types. +func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() && !mc.EncodeNilAsEmpty { + // If we have a nil map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return mc.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + keyStr, err := mc.encodeKey(key) + if err != nil { + return err + } + + if collisionFn != nil && collisionFn(keyStr) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(keyStr) + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// DecodeValue is the ValueDecoder for map[string/decimal]* types. +func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + if val.Len() > 0 && mc.DecodeZerosMap { + clearMap(val) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + eTypeDecoder, _ := decoder.(typeDecoder) + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + k, err := mc.decodeKey(key, keyType) + if err != nil { + return err + } + + elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) + if err != nil { + return newDecodeError(key, err) + } + + val.SetMapIndex(k, elem) + } + return nil +} + +func clearMap(m reflect.Value) { + var none reflect.Value + for _, k := range m.MapKeys() { + m.SetMapIndex(k, none) + } +} + +func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { + if mc.EncodeKeysWithStringer { + return fmt.Sprint(val), nil + } + + // keys of any string type are used directly + if val.Kind() == reflect.String { + return val.String(), nil + } + // KeyMarshalers are marshaled + if km, ok := val.Interface().(KeyMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + buf, err := km.MarshalKey() + if err == nil { + return buf, nil + } + return "", err + } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } + + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil + } + return "", fmt.Errorf("unsupported key type: %v", val.Type()) +} + +var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { + keyVal := reflect.ValueOf(key) + var err error + switch { + // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler + case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(KeyUnmarshaler) + err = v.UnmarshalKey(key) + keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() + // Otherwise, go to type specific behavior + default: + switch keyType.Kind() { + case reflect.String: + keyVal = reflect.ValueOf(key).Convert(keyType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, parseErr := strconv.ParseInt(key, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { + err = fmt.Errorf("failed to unmarshal number key %v", key) + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, parseErr := strconv.ParseUint(key, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { + err = fmt.Errorf("failed to unmarshal number key %v", key) + break + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Float32, reflect.Float64: + if mc.EncodeKeysWithStringer { + parsed, err := strconv.ParseFloat(key, 64) + if err != nil { + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) + } + keyVal = reflect.ValueOf(parsed) + break + } + fallthrough + default: + return keyVal, fmt.Errorf("unsupported key type: %v", keyType) + } + } + return keyVal, err +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go new file mode 100644 index 0000000..fbd9f0a --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go @@ -0,0 +1,65 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import "fmt" + +type mode int + +const ( + _ mode = iota + mTopLevel + mDocument + mArray + mValue + mElement + mCodeWithScope + mSpacer +) + +func (m mode) String() string { + var str string + + switch m { + case mTopLevel: + str = "TopLevel" + case mDocument: + str = "DocumentMode" + case mArray: + str = "ArrayMode" + case mValue: + str = "ValueMode" + case mElement: + str = "ElementMode" + case mCodeWithScope: + str = "CodeWithScopeMode" + case mSpacer: + str = "CodeWithScopeSpacerFrame" + default: + str = "UnknownMode" + } + + return str +} + +// TransitionError is an error returned when an invalid progressing a +// ValueReader or ValueWriter state machine occurs. +type TransitionError struct { + parent mode + current mode + destination mode +} + +func (te TransitionError) Error() string { + if te.destination == mode(0) { + return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) + } + if te.parent == mode(0) { + return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) + } + return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go new file mode 100644 index 0000000..616a3e7 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -0,0 +1,109 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var _ ValueEncoder = &PointerCodec{} +var _ ValueDecoder = &PointerCodec{} + +// PointerCodec is the Codec used for pointers. +type PointerCodec struct { + ecache map[reflect.Type]ValueEncoder + dcache map[reflect.Type]ValueDecoder + l sync.RWMutex +} + +// NewPointerCodec returns a PointerCodec that has been initialized. +func NewPointerCodec() *PointerCodec { + return &PointerCodec{ + ecache: make(map[reflect.Type]ValueEncoder), + dcache: make(map[reflect.Type]ValueDecoder), + } +} + +// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil +// or looking up an encoder for the type of value the pointer points to. +func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.Ptr { + if !val.IsValid() { + return vw.WriteNull() + } + return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + pc.l.RLock() + enc, ok := pc.ecache[val.Type()] + pc.l.RUnlock() + if ok { + if enc == nil { + return ErrNoEncoder{Type: val.Type()} + } + return enc.EncodeValue(ec, vw, val.Elem()) + } + + enc, err := ec.LookupEncoder(val.Type().Elem()) + pc.l.Lock() + pc.ecache[val.Type()] = enc + pc.l.Unlock() + if err != nil { + return err + } + + return enc.EncodeValue(ec, vw, val.Elem()) +} + +// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and +// using that to decode. If the BSON value is Null, this method will set the pointer to nil. +func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Ptr { + return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + if vr.Type() == bsontype.Undefined { + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + } + + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + + pc.l.RLock() + dec, ok := pc.dcache[val.Type()] + pc.l.RUnlock() + if ok { + if dec == nil { + return ErrNoDecoder{Type: val.Type()} + } + return dec.DecodeValue(dc, vr, val.Elem()) + } + + dec, err := dc.LookupDecoder(val.Type().Elem()) + pc.l.Lock() + pc.dcache[val.Type()] = dec + pc.l.Unlock() + if err != nil { + return err + } + + return dec.DecodeValue(dc, vr, val.Elem()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go new file mode 100644 index 0000000..4cf2b01 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types +// that implement this interface with have ProxyBSON called during the encoding process and that +// value will be encoded in place for the implementer. +type Proxy interface { + ProxyBSON() (interface{}, error) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go new file mode 100644 index 0000000..8064402 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -0,0 +1,469 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +var ErrNilType = errors.New("cannot perform a decoder lookup on ") + +// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") + +// ErrNoEncoder is returned when there wasn't an encoder available for a type. +type ErrNoEncoder struct { + Type reflect.Type +} + +func (ene ErrNoEncoder) Error() string { + if ene.Type == nil { + return "no encoder found for " + } + return "no encoder found for " + ene.Type.String() +} + +// ErrNoDecoder is returned when there wasn't a decoder available for a type. +type ErrNoDecoder struct { + Type reflect.Type +} + +func (end ErrNoDecoder) Error() string { + return "no decoder found for " + end.Type.String() +} + +// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +type ErrNoTypeMapEntry struct { + Type bsontype.Type +} + +func (entme ErrNoTypeMapEntry) Error() string { + return "no type map entry found for " + entme.Type.String() +} + +// ErrNotInterface is returned when the provided type is not an interface. +var ErrNotInterface = errors.New("The provided type is not an interface") + +// A RegistryBuilder is used to build a Registry. This type is not goroutine +// safe. +type RegistryBuilder struct { + typeEncoders map[reflect.Type]ValueEncoder + interfaceEncoders []interfaceValueEncoder + kindEncoders map[reflect.Kind]ValueEncoder + + typeDecoders map[reflect.Type]ValueDecoder + interfaceDecoders []interfaceValueDecoder + kindDecoders map[reflect.Kind]ValueDecoder + + typeMap map[bsontype.Type]reflect.Type +} + +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + typeEncoders map[reflect.Type]ValueEncoder + typeDecoders map[reflect.Type]ValueDecoder + + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + + kindEncoders map[reflect.Kind]ValueEncoder + kindDecoders map[reflect.Kind]ValueDecoder + + typeMap map[bsontype.Type]reflect.Type + + mu sync.RWMutex +} + +// NewRegistryBuilder creates a new empty RegistryBuilder. +func NewRegistryBuilder() *RegistryBuilder { + return &RegistryBuilder{ + typeEncoders: make(map[reflect.Type]ValueEncoder), + typeDecoders: make(map[reflect.Type]ValueDecoder), + + interfaceEncoders: make([]interfaceValueEncoder, 0), + interfaceDecoders: make([]interfaceValueDecoder, 0), + + kindEncoders: make(map[reflect.Kind]ValueEncoder), + kindDecoders: make(map[reflect.Kind]ValueDecoder), + + typeMap: make(map[bsontype.Type]reflect.Type), + } +} + +func buildDefaultRegistry() *Registry { + rb := NewRegistryBuilder() + defaultValueEncoders.RegisterDefaultEncoders(rb) + defaultValueDecoders.RegisterDefaultDecoders(rb) + return rb.Build() +} + +// RegisterCodec will register the provided ValueCodec for the provided type. +func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { + rb.RegisterTypeEncoder(t, codec) + rb.RegisterTypeDecoder(t, codec) + return rb +} + +// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. +// +// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It +// will not be called when marshalling a non-interface type that implements the interface. +func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + rb.typeEncoders[t] = enc + return rb +} + +// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when +// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + if t.Kind() != reflect.Interface { + panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", t, t.Kind()) + panic(panicStr) + } + + for idx, encoder := range rb.interfaceEncoders { + if encoder.i == t { + rb.interfaceEncoders[idx].ve = enc + return rb + } + } + + rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + return rb +} + +// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. +// +// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface. +// It will not be called when unmarshalling into a non-interface type that implements the interface. +func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + rb.typeDecoders[t] = dec + return rb +} + +// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when +// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + if t.Kind() != reflect.Interface { + panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", t, t.Kind()) + panic(panicStr) + } + + for idx, decoder := range rb.interfaceDecoders { + if decoder.i == t { + rb.interfaceDecoders[idx].vd = dec + return rb + } + } + + rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + return rb +} + +// RegisterEncoder registers the provided type and encoder pair. +// +// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead. +func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + if t == tEmpty { + rb.typeEncoders[t] = enc + return rb + } + switch t.Kind() { + case reflect.Interface: + for idx, ir := range rb.interfaceEncoders { + if ir.i == t { + rb.interfaceEncoders[idx].ve = enc + return rb + } + } + + rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + default: + rb.typeEncoders[t] = enc + } + return rb +} + +// RegisterDecoder registers the provided type and decoder pair. +// +// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead. +func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + if t == nil { + rb.typeDecoders[nil] = dec + return rb + } + if t == tEmpty { + rb.typeDecoders[t] = dec + return rb + } + switch t.Kind() { + case reflect.Interface: + for idx, ir := range rb.interfaceDecoders { + if ir.i == t { + rb.interfaceDecoders[idx].vd = dec + return rb + } + } + + rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + default: + rb.typeDecoders[t] = dec + } + return rb +} + +// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided +// kind. +func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { + rb.kindEncoders[kind] = enc + return rb +} + +// RegisterDefaultDecoder will register the provided ValueDecoder to the +// provided kind. +func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { + rb.kindDecoders[kind] = dec + return rb +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { + rb.typeMap[bt] = rt + return rb +} + +// Build creates a Registry from the current state of this RegistryBuilder. +func (rb *RegistryBuilder) Build() *Registry { + registry := new(Registry) + + registry.typeEncoders = make(map[reflect.Type]ValueEncoder) + for t, enc := range rb.typeEncoders { + registry.typeEncoders[t] = enc + } + + registry.typeDecoders = make(map[reflect.Type]ValueDecoder) + for t, dec := range rb.typeDecoders { + registry.typeDecoders[t] = dec + } + + registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) + copy(registry.interfaceEncoders, rb.interfaceEncoders) + + registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) + copy(registry.interfaceDecoders, rb.interfaceDecoders) + + registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) + for kind, enc := range rb.kindEncoders { + registry.kindEncoders[kind] = enc + } + + registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) + for kind, dec := range rb.kindDecoders { + registry.kindDecoders[kind] = dec + } + + registry.typeMap = make(map[bsontype.Type]reflect.Type) + for bt, rt := range rb.typeMap { + registry.typeMap[bt] = rt + } + + return registry +} + +// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: +// +// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using +// RegisterTypeEncoder for the interface will be selected. +// +// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the +// type. +// +// 3. An encoder registered for the reflect.Kind of the value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. +func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { + encodererr := ErrNoEncoder{Type: t} + r.mu.RLock() + enc, found := r.lookupTypeEncoder(t) + r.mu.RUnlock() + if found { + if enc == nil { + return nil, ErrNoEncoder{Type: t} + } + return enc, nil + } + + enc, found = r.lookupInterfaceEncoder(t, true) + if found { + r.mu.Lock() + r.typeEncoders[t] = enc + r.mu.Unlock() + return enc, nil + } + + if t == nil { + r.mu.Lock() + r.typeEncoders[t] = nil + r.mu.Unlock() + return nil, encodererr + } + + enc, found = r.kindEncoders[t.Kind()] + if !found { + r.mu.Lock() + r.typeEncoders[t] = nil + r.mu.Unlock() + return nil, encodererr + } + + r.mu.Lock() + r.typeEncoders[t] = enc + r.mu.Unlock() + return enc, nil +} + +func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { + enc, found := r.typeEncoders[t] + return enc, found +} + +func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if t == nil { + return nil, false + } + for _, ienc := range r.interfaceEncoders { + if t.Implements(ienc.i) { + return ienc.ve, true + } + if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further ahead + // in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(t, false) + if !found { + defaultEnc = r.kindEncoders[t.Kind()] + } + return newCondAddrEncoder(ienc.ve, defaultEnc), true + } + } + return nil, false +} + +// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: +// +// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using +// RegisterTypeDecoder for the interface will be selected. +// +// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the +// type. +// +// 3. A decoder registered for the reflect.Kind of the value. +// +// If no decoder is found, an error of type ErrNoDecoder is returned. +func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { + if t == nil { + return nil, ErrNilType + } + decodererr := ErrNoDecoder{Type: t} + r.mu.RLock() + dec, found := r.lookupTypeDecoder(t) + r.mu.RUnlock() + if found { + if dec == nil { + return nil, ErrNoDecoder{Type: t} + } + return dec, nil + } + + dec, found = r.lookupInterfaceDecoder(t, true) + if found { + r.mu.Lock() + r.typeDecoders[t] = dec + r.mu.Unlock() + return dec, nil + } + + dec, found = r.kindDecoders[t.Kind()] + if !found { + r.mu.Lock() + r.typeDecoders[t] = nil + r.mu.Unlock() + return nil, decodererr + } + + r.mu.Lock() + r.typeDecoders[t] = dec + r.mu.Unlock() + return dec, nil +} + +func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { + dec, found := r.typeDecoders[t] + return dec, found +} + +func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { + for _, idec := range r.interfaceDecoders { + if t.Implements(idec.i) { + return idec.vd, true + } + if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further ahead + // in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(t, false) + if !found { + defaultDec = r.kindDecoders[t.Kind()] + } + return newCondAddrDecoder(idec.vd, defaultDec), true + } + } + return nil, false +} + +// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON +// type. If no type is found, ErrNoTypeMapEntry is returned. +func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { + t, ok := r.typeMap[bt] + if !ok || t == nil { + return nil, ErrNoTypeMapEntry{Type: bt} + } + return t, nil +} + +type interfaceValueEncoder struct { + i reflect.Type + ve ValueEncoder +} + +type interfaceValueDecoder struct { + i reflect.Type + vd ValueDecoder +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go new file mode 100644 index 0000000..3c1b6b8 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -0,0 +1,199 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var defaultSliceCodec = NewSliceCodec() + +// SliceCodec is the Codec used for slice values. +type SliceCodec struct { + EncodeNilAsEmpty bool +} + +var _ ValueCodec = &MapCodec{} + +// NewSliceCodec returns a MapCodec with options opts. +func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { + sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) + + codec := SliceCodec{} + if sliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for slice types. +func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() && !sc.EncodeNilAsEmpty { + return vw.WriteNull() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + var byteSlice []byte + for idx := 0; idx < val.Len(); idx++ { + byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) + } + return vw.WriteBinary(byteSlice) + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// DecodeValue is the ValueDecoder for slice types. +func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) + } + + val.SetLen(0) + for _, elem := range data { + val.Set(reflect.Append(val, reflect.ValueOf(elem))) + } + return nil + case bsontype.String: + if sliceType := val.Type().Elem(); sliceType != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) + } + str, err := vr.ReadString() + if err != nil { + return err + } + byteStr := []byte(str) + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) + } + + val.SetLen(0) + for _, elem := range byteStr { + val.Set(reflect.Append(val, reflect.ValueOf(elem))) + } + return nil + default: + return fmt.Errorf("cannot decode %v into a slice", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = defaultValueDecoders.decodeD + default: + elemsFunc = defaultValueDecoders.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go new file mode 100644 index 0000000..5332b7c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -0,0 +1,119 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// StringCodec is the Codec used for struct values. +type StringCodec struct { + DecodeObjectIDAsHex bool +} + +var ( + defaultStringCodec = NewStringCodec() + + _ ValueCodec = defaultStringCodec + _ typeDecoder = defaultStringCodec +) + +// NewStringCodec returns a StringCodec with options opts. +func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { + stringOpt := bsonoptions.MergeStringCodecOptions(opts...) + return &StringCodec{*stringOpt.DecodeObjectIDAsHex} +} + +// EncodeValue is the ValueEncoder for string types. +func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t.Kind() != reflect.String { + return emptyValue, ValueDecoderError{ + Name: "StringDecodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: reflect.Zero(t), + } + } + + var str string + var err error + switch vr.Type() { + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return emptyValue, err + } + case bsontype.ObjectID: + oid, err := vr.ReadObjectID() + if err != nil { + return emptyValue, err + } + if sc.DecodeObjectIDAsHex { + str = oid.Hex() + } else { + byteArray := [12]byte(oid) + str = string(byteArray[:]) + } + case bsontype.Symbol: + str, err = vr.ReadSymbol() + if err != nil { + return emptyValue, err + } + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return emptyValue, err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} + } + str = string(data) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + + return reflect.ValueOf(str), nil +} + +// DecodeValue is the ValueDecoder for string types. +func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + elem, err := sc.decodeType(dctx, vr, val.Type()) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go new file mode 100644 index 0000000..da1ae18 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -0,0 +1,669 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. +type DecodeError struct { + keys []string + wrapped error +} + +// Unwrap returns the underlying error +func (de *DecodeError) Unwrap() error { + return de.wrapped +} + +// Error implements the error interface. +func (de *DecodeError) Error() string { + // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the + // stack of BSON keys, so we call de.Keys(), which reverses them. + keyPath := strings.Join(de.Keys(), ".") + return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) +} + +// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down +// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be +// a string, the keys slice will be ["a", "b", "c"]. +func (de *DecodeError) Keys() []string { + reversedKeys := make([]string, 0, len(de.keys)) + for idx := len(de.keys) - 1; idx >= 0; idx-- { + reversedKeys = append(reversedKeys, de.keys[idx]) + } + + return reversedKeys +} + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// StructCodec is the Codec used for struct values. +type StructCodec struct { + cache map[reflect.Type]*structDescription + l sync.RWMutex + parser StructTagParser + DecodeZeroStruct bool + DecodeDeepZeroInline bool + EncodeOmitDefaultStruct bool + AllowUnexportedFields bool + OverwriteDuplicatedInlinedFields bool +} + +var _ ValueEncoder = &StructCodec{} +var _ ValueDecoder = &StructCodec{} + +// NewStructCodec returns a StructCodec that uses p for struct tag parsing. +func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { + if p == nil { + return nil, errors.New("a StructTagParser must be provided to NewStructCodec") + } + + structOpt := bsonoptions.MergeStructCodecOptions(opts...) + + codec := &StructCodec{ + cache: make(map[reflect.Type]*structDescription), + parser: p, + } + + if structOpt.DecodeZeroStruct != nil { + codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct + } + if structOpt.DecodeDeepZeroInline != nil { + codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline + } + if structOpt.EncodeOmitDefaultStruct != nil { + codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct + } + if structOpt.OverwriteDuplicatedInlinedFields != nil { + codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields + } + if structOpt.AllowUnexportedFields != nil { + codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields + } + + return codec, nil +} + +// EncodeValue handles encoding generic struct types. +func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Struct { + return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + sd, err := sc.describeStruct(r.Registry, val.Type()) + if err != nil { + return err + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + var rv reflect.Value + for _, desc := range sd.fl { + if desc.inline == nil { + rv = val.Field(desc.idx) + } else { + rv, err = fieldByIndexErr(val, desc.inline) + if err != nil { + continue + } + } + + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv) + + if err != nil && err != errInvalidValue { + return err + } + + if err == errInvalidValue { + if desc.omitEmpty { + continue + } + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + err = vw2.WriteNull() + if err != nil { + return err + } + continue + } + + if desc.encoder == nil { + return ErrNoEncoder{Type: rv.Type()} + } + + encoder := desc.encoder + + var isZero bool + rvInterface := rv.Interface() + if cz, ok := encoder.(CodecZeroer); ok { + isZero = cz.IsTypeZero(rvInterface) + } else if rv.Kind() == reflect.Interface { + // sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately. + isZero = rv.IsNil() + } else { + isZero = sc.isZero(rvInterface) + } + if desc.omitEmpty && isZero { + continue + } + + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + + ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} + err = encoder.EncodeValue(ectx, vw2, rv) + if err != nil { + return err + } + } + + if sd.inlineMap >= 0 { + rv := val.Field(sd.inlineMap) + collisionFn := func(key string) bool { + _, exists := sd.fm[key] + return exists + } + + return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn) + } + + return dw.WriteDocumentEnd() +} + +func newDecodeError(key string, original error) error { + de, ok := original.(*DecodeError) + if !ok { + return &DecodeError{ + keys: []string{key}, + wrapped: original, + } + } + + de.keys = append(de.keys, key) + return de +} + +// DecodeValue implements the Codec interface. +// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. +// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. +func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Struct { + return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + sd, err := sc.describeStruct(r.Registry, val.Type()) + if err != nil { + return err + } + + if sc.DecodeZeroStruct { + val.Set(reflect.Zero(val.Type())) + } + if sc.DecodeDeepZeroInline && sd.inline { + val.Set(deepZero(val.Type())) + } + + var decoder ValueDecoder + var inlineMap reflect.Value + if sd.inlineMap >= 0 { + inlineMap = val.Field(sd.inlineMap) + decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) + if err != nil { + return err + } + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + for { + name, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + fd, exists := sd.fm[name] + if !exists { + // if the original name isn't found in the struct description, try again with the name in lowercase + // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field + // names + fd, exists = sd.fm[strings.ToLower(name)] + } + + if !exists { + if sd.inlineMap < 0 { + // The encoding/json package requires a flag to return on error for non-existent fields. + // This functionality seems appropriate for the struct codec. + err = vr.Skip() + if err != nil { + return err + } + continue + } + + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + + elem := reflect.New(inlineMap.Type().Elem()).Elem() + r.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(r, vr, elem) + if err != nil { + return err + } + inlineMap.SetMapIndex(reflect.ValueOf(name), elem) + continue + } + + var field reflect.Value + if fd.inline == nil { + field = val.Field(fd.idx) + } else { + field, err = getInlineField(val, fd.inline) + if err != nil { + return err + } + } + + if !field.CanSet() { // Being settable is a super set of being addressable. + innerErr := fmt.Errorf("field %v is not settable", field) + return newDecodeError(fd.name, innerErr) + } + if field.Kind() == reflect.Ptr && field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + field = field.Addr() + + dctx := DecodeContext{ + Registry: r.Registry, + Truncate: fd.truncate || r.Truncate, + defaultDocumentType: r.defaultDocumentType, + } + + if fd.decoder == nil { + return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) + } + + err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) + if err != nil { + return newDecodeError(fd.name, err) + } + } + + return nil +} + +func (sc *StructCodec) isZero(i interface{}) bool { + v := reflect.ValueOf(i) + + // check the value validity + if !v.IsValid() { + return true + } + + if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { + return z.IsZero() + } + + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Struct: + if sc.EncodeOmitDefaultStruct { + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + for i := 0; i < v.NumField(); i++ { + if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { + continue // Private field + } + fld := v.Field(i) + if !sc.isZero(fld.Interface()) { + return false + } + } + return true + } + } + + return false +} + +type structDescription struct { + fm map[string]fieldDescription + fl []fieldDescription + inlineMap int + inline bool +} + +type fieldDescription struct { + name string // BSON key name + fieldName string // struct field name + idx int + omitEmpty bool + minSize bool + truncate bool + inline []int + encoder ValueEncoder + decoder ValueDecoder +} + +type byIndex []fieldDescription + +func (bi byIndex) Len() int { return len(bi) } + +func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } + +func (bi byIndex) Less(i, j int) bool { + // If a field is inlined, its index in the top level struct is stored at inline[0] + iIdx, jIdx := bi[i].idx, bi[j].idx + if len(bi[i].inline) > 0 { + iIdx = bi[i].inline[0] + } + if len(bi[j].inline) > 0 { + jIdx = bi[j].inline[0] + } + if iIdx != jIdx { + return iIdx < jIdx + } + for k, biik := range bi[i].inline { + if k >= len(bi[j].inline) { + return false + } + if biik != bi[j].inline[k] { + return biik < bi[j].inline[k] + } + } + return len(bi[i].inline) < len(bi[j].inline) +} + +func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { + // We need to analyze the struct, including getting the tags, collecting + // information about inlining, and create a map of the field name to the field. + sc.l.RLock() + ds, exists := sc.cache[t] + sc.l.RUnlock() + if exists { + return ds, nil + } + + numFields := t.NumField() + sd := &structDescription{ + fm: make(map[string]fieldDescription, numFields), + fl: make([]fieldDescription, 0, numFields), + inlineMap: -1, + } + + var fields []fieldDescription + for i := 0; i < numFields; i++ { + sf := t.Field(i) + if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { + // field is private or unexported fields aren't allowed, ignore + continue + } + + sfType := sf.Type + encoder, err := r.LookupEncoder(sfType) + if err != nil { + encoder = nil + } + decoder, err := r.LookupDecoder(sfType) + if err != nil { + decoder = nil + } + + description := fieldDescription{ + fieldName: sf.Name, + idx: i, + encoder: encoder, + decoder: decoder, + } + + stags, err := sc.parser.ParseStructTags(sf) + if err != nil { + return nil, err + } + if stags.Skip { + continue + } + description.name = stags.Name + description.omitEmpty = stags.OmitEmpty + description.minSize = stags.MinSize + description.truncate = stags.Truncate + + if stags.Inline { + sd.inline = true + switch sfType.Kind() { + case reflect.Map: + if sd.inlineMap >= 0 { + return nil, errors.New("(struct " + t.String() + ") multiple inline maps") + } + if sfType.Key() != tString { + return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") + } + sd.inlineMap = description.idx + case reflect.Ptr: + sfType = sfType.Elem() + if sfType.Kind() != reflect.Struct { + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + fallthrough + case reflect.Struct: + inlinesf, err := sc.describeStruct(r, sfType) + if err != nil { + return nil, err + } + for _, fd := range inlinesf.fl { + if fd.inline == nil { + fd.inline = []int{i, fd.idx} + } else { + fd.inline = append([]int{i}, fd.inline...) + } + fields = append(fields, fd) + + } + default: + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + continue + } + fields = append(fields, description) + } + + // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name + sort.Slice(fields, func(i, j int) bool { + x := fields + // sort field by name, breaking ties with depth, then + // breaking ties with index sequence. + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].inline) != len(x[j].inline) { + return len(x[i].inline) < len(x[j].inline) + } + return byIndex(x).Less(i, j) + }) + + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + sd.fl = append(sd.fl, fi) + sd.fm[name] = fi + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if !ok || !sc.OverwriteDuplicatedInlinedFields { + return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) + } + sd.fl = append(sd.fl, dominant) + sd.fm[name] = dominant + } + + sort.Sort(byIndex(sd.fl)) + + sc.l.Lock() + sc.cache[t] = sd + sc.l.Unlock() + + return sd, nil +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's inlining rules. If there are multiple top-level +// fields, the boolean will be false: This condition is an error in Go +// and we skip all the fields. +func dominantField(fields []fieldDescription) (fieldDescription, bool) { + // The fields are sorted in increasing index-length order, then by presence of tag. + // That means that the first field is the dominant one. We need only check + // for error cases: two fields at top level. + if len(fields) > 1 && + len(fields[0].inline) == len(fields[1].inline) { + return fieldDescription{}, false + } + return fields[0], true +} + +func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { + defer func() { + if recovered := recover(); recovered != nil { + switch r := recovered.(type) { + case string: + err = fmt.Errorf("%s", r) + case error: + err = r + } + } + }() + + result = v.FieldByIndex(index) + return +} + +func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { + field, err := fieldByIndexErr(val, index) + if err == nil { + return field, nil + } + + // if parent of this element doesn't exist, fix its parent + inlineParent := index[:len(index)-1] + var fParent reflect.Value + if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { + fParent, err = getInlineField(val, inlineParent) + if err != nil { + return fParent, err + } + } + fParent.Set(reflect.New(fParent.Type().Elem())) + + return fieldByIndexErr(val, index) +} + +// DeepZero returns recursive zero object +func deepZero(st reflect.Type) (result reflect.Value) { + result = reflect.Indirect(reflect.New(st)) + + if result.Kind() == reflect.Struct { + for i := 0; i < result.NumField(); i++ { + if f := result.Field(i); f.Kind() == reflect.Ptr { + if f.CanInterface() { + if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) + } + } + } + } + } + + return +} + +// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside +func recursivePointerTo(v reflect.Value) reflect.Value { + v = reflect.Indirect(v) + result := reflect.New(v.Type()) + if v.Kind() == reflect.Struct { + for i := 0; i < v.NumField(); i++ { + if f := v.Field(i); f.Kind() == reflect.Ptr { + if f.Elem().Kind() == reflect.Struct { + result.Elem().Field(i).Set(recursivePointerTo(f)) + } + } + } + } + + return result +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go new file mode 100644 index 0000000..62708c5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -0,0 +1,139 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "strings" +) + +// StructTagParser returns the struct tags for a given struct field. +type StructTagParser interface { + ParseStructTags(reflect.StructField) (StructTags, error) +} + +// StructTagParserFunc is an adapter that allows a generic function to be used +// as a StructTagParser. +type StructTagParserFunc func(reflect.StructField) (StructTags, error) + +// ParseStructTags implements the StructTagParser interface. +func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { + return stpf(sf) +} + +// StructTags represents the struct tag fields that the StructCodec uses during +// the encoding and decoding process. +// +// In the case of a struct, the lowercased field name is used as the key for each exported +// field but this behavior may be changed using a struct tag. The tag may also contain flags to +// adjust the marshalling behavior for the field. +// +// The properties are defined below: +// +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. +// +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. +// +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. +// +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. +// +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. +// +// TODO(skriptble): Add tags for undefined as nil and for null as nil. +type StructTags struct { + Name string + OmitEmpty bool + MinSize bool + Truncate bool + Inline bool + Skip bool +} + +// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. +// It will handle the bson struct tag. See the documentation for StructTags to see +// what each of the returned fields means. +// +// If there is no name in the struct tag fields, the struct field name is lowercased. +// The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// An example: +// +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } +// +// A struct tag either consisting entirely of '-' or with a bson key with a +// value consisting entirely of '-' will return a StructTags with Skip true and +// the remaining fields will be their default values. +var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + return parseTags(key, tag) +} + +func parseTags(key string, tag string) (StructTags, error) { + var st StructTags + if tag == "-" { + st.Skip = true + return st, nil + } + + for idx, str := range strings.Split(tag, ",") { + if idx == 0 && str != "" { + key = str + } + switch str { + case "omitempty": + st.OmitEmpty = true + case "minsize": + st.MinSize = true + case "truncate": + st.Truncate = true + case "inline": + st.Inline = true + } + } + + st.Name = key + + return st, nil +} + +// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser +// but will also fallback to parsing the json tag instead on a field where the +// bson tag isn't available. +var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok { + tag, ok = sf.Tag.Lookup("json") + } + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + + return parseTags(key, tag) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go new file mode 100644 index 0000000..ec7e30f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -0,0 +1,127 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +const ( + timeFormatString = "2006-01-02T15:04:05.999Z07:00" +) + +// TimeCodec is the Codec used for time.Time values. +type TimeCodec struct { + UseLocalTimeZone bool +} + +var ( + defaultTimeCodec = NewTimeCodec() + + _ ValueCodec = defaultTimeCodec + _ typeDecoder = defaultTimeCodec +) + +// NewTimeCodec returns a TimeCodec with options opts. +func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { + timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) + + codec := TimeCodec{} + if timeOpt.UseLocalTimeZone != nil { + codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone + } + return &codec +} + +func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tTime { + return emptyValue, ValueDecoderError{ + Name: "TimeDecodeValue", + Types: []reflect.Type{tTime}, + Received: reflect.Zero(t), + } + } + + var timeVal time.Time + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err := vr.ReadDateTime() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(dt/1000, dt%1000*1000000) + case bsontype.String: + // assume strings are in the isoTimeFormat + timeStr, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + timeVal, err = time.Parse(timeFormatString, timeStr) + if err != nil { + return emptyValue, err + } + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(i64/1000, i64%1000*1000000) + case bsontype.Timestamp: + t, _, err := vr.ReadTimestamp() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(int64(t), 0) + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) + } + + if !tc.UseLocalTimeZone { + timeVal = timeVal.UTC() + } + return reflect.ValueOf(timeVal), nil +} + +// DecodeValue is the ValueDecoderFunc for time.Time. +func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + elem, err := tc.decodeType(dc, vr, tTime) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// EncodeValue is the ValueEncoderFunc for time.TIme. +func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go new file mode 100644 index 0000000..07f4b70 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -0,0 +1,57 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "net/url" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var tBool = reflect.TypeOf(false) +var tFloat64 = reflect.TypeOf(float64(0)) +var tInt32 = reflect.TypeOf(int32(0)) +var tInt64 = reflect.TypeOf(int64(0)) +var tString = reflect.TypeOf("") +var tTime = reflect.TypeOf(time.Time{}) + +var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() +var tByteSlice = reflect.TypeOf([]byte(nil)) +var tByte = reflect.TypeOf(byte(0x00)) +var tURL = reflect.TypeOf(url.URL{}) +var tJSONNumber = reflect.TypeOf(json.Number("")) + +var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() +var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() +var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() +var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() + +var tBinary = reflect.TypeOf(primitive.Binary{}) +var tUndefined = reflect.TypeOf(primitive.Undefined{}) +var tOID = reflect.TypeOf(primitive.ObjectID{}) +var tDateTime = reflect.TypeOf(primitive.DateTime(0)) +var tNull = reflect.TypeOf(primitive.Null{}) +var tRegex = reflect.TypeOf(primitive.Regex{}) +var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) +var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) +var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) +var tSymbol = reflect.TypeOf(primitive.Symbol("")) +var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) +var tDecimal = reflect.TypeOf(primitive.Decimal128{}) +var tMinKey = reflect.TypeOf(primitive.MinKey{}) +var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) +var tD = reflect.TypeOf(primitive.D{}) +var tA = reflect.TypeOf(primitive.A{}) +var tE = reflect.TypeOf(primitive.E{}) + +var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) +var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go new file mode 100644 index 0000000..0b21ce9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -0,0 +1,173 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "math" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// UIntCodec is the Codec used for uint values. +type UIntCodec struct { + EncodeToMinSize bool +} + +var ( + defaultUIntCodec = NewUIntCodec() + + _ ValueCodec = defaultUIntCodec + _ typeDecoder = defaultUIntCodec +) + +// NewUIntCodec returns a UIntCodec with options opts. +func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { + uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) + + codec := UIntCodec{} + if uintOpt.EncodeToMinSize != nil { + codec.EncodeToMinSize = *uintOpt.EncodeToMinSize + } + return &codec +} + +// EncodeValue is the ValueEncoder for uint types. +func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + + // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 + useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) + + if u64 <= math.MaxInt32 && useMinSize { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return emptyValue, err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return emptyValue, errCannotTruncate + } + if f64 > float64(math.MaxInt64) { + return emptyValue, fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch t.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return emptyValue, fmt.Errorf("%d overflows uint8", i64) + } + + return reflect.ValueOf(uint8(i64)), nil + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return emptyValue, fmt.Errorf("%d overflows uint16", i64) + } + + return reflect.ValueOf(uint16(i64)), nil + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return emptyValue, fmt.Errorf("%d overflows uint32", i64) + } + + return reflect.ValueOf(uint32(i64)), nil + case reflect.Uint64: + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint64", i64) + } + + return reflect.ValueOf(uint64(i64)), nil + case reflect.Uint: + if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + + return reflect.ValueOf(uint(i64)), nil + default: + return emptyValue, ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.Zero(t), + } + } +} + +// DecodeValue is the ValueDecoder for uint types. +func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + elem, err := uic.decodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.SetUint(elem.Uint()) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go new file mode 100644 index 0000000..b1256a4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +type ByteSliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +} + +// ByteSliceCodec creates a new *ByteSliceCodecOptions +func ByteSliceCodec() *ByteSliceCodecOptions { + return &ByteSliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { + bs.EncodeNilAsEmpty = &b + return bs +} + +// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { + bs := ByteSliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return bs +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go new file mode 100644 index 0000000..c40973c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonoptions defines the optional configurations for the BSON codecs. +package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go new file mode 100644 index 0000000..6caaa00 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +type EmptyInterfaceCodecOptions struct { + DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +} + +// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { + return &EmptyInterfaceCodecOptions{} +} + +// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { + e.DecodeBinaryAsSlice = &b + return e +} + +// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { + e := EmptyInterfaceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeBinaryAsSlice != nil { + e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice + } + } + + return e +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go new file mode 100644 index 0000000..7a6a880 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -0,0 +1,67 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// MapCodecOptions represents all possible options for map encoding and decoding. +type MapCodecOptions struct { + DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. + EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. + // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must + // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a + // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the + // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override + // TextMarshaler/TextUnmarshaler. Defaults to false. + EncodeKeysWithStringer *bool +} + +// MapCodec creates a new *MapCodecOptions +func MapCodec() *MapCodecOptions { + return &MapCodecOptions{} +} + +// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { + t.DecodeZerosMap = &b + return t +} + +// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { + t.EncodeNilAsEmpty = &b + return t +} + +// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the +// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key +// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with +// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer +// will override TextMarshaler/TextUnmarshaler. Defaults to false. +func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { + t.EncodeKeysWithStringer = &b + return t +} + +// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { + s := MapCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeZerosMap != nil { + s.DecodeZerosMap = opt.DecodeZerosMap + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + if opt.EncodeKeysWithStringer != nil { + s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go new file mode 100644 index 0000000..ef965e4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// SliceCodecOptions represents all possible options for slice encoding and decoding. +type SliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +} + +// SliceCodec creates a new *SliceCodecOptions +func SliceCodec() *SliceCodecOptions { + return &SliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { + s.EncodeNilAsEmpty = &b + return s +} + +// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { + s := SliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go new file mode 100644 index 0000000..65964f4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -0,0 +1,41 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultDecodeOIDAsHex = true + +// StringCodecOptions represents all possible options for string encoding and decoding. +type StringCodecOptions struct { + DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. +} + +// StringCodec creates a new *StringCodecOptions +func StringCodec() *StringCodecOptions { + return &StringCodecOptions{} +} + +// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made +// from the raw object ID bytes will be used. Defaults to true. +func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { + t.DecodeObjectIDAsHex = &b + return t +} + +// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { + s := &StringCodecOptions{&defaultDecodeOIDAsHex} + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeObjectIDAsHex != nil { + s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go new file mode 100644 index 0000000..78d1dd8 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -0,0 +1,87 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultOverwriteDuplicatedInlinedFields = true + +// StructCodecOptions represents all possible options for struct encoding and decoding. +type StructCodecOptions struct { + DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. + DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. + EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. + AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. + OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. +} + +// StructCodec creates a new *StructCodecOptions +func StructCodec() *StructCodecOptions { + return &StructCodecOptions{} +} + +// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { + t.DecodeZeroStruct = &b + return t +} + +// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { + t.DecodeDeepZeroInline = &b + return t +} + +// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all +// its values set to their default value. Defaults to false. +func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { + t.EncodeOmitDefaultStruct = &b + return t +} + +// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the +// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when +// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if +// there are duplicate keys after the struct is inlined. Defaults to true. +func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { + t.OverwriteDuplicatedInlinedFields = &b + return t +} + +// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { + t.AllowUnexportedFields = &b + return t +} + +// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { + s := &StructCodecOptions{ + OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, + } + for _, opt := range opts { + if opt == nil { + continue + } + + if opt.DecodeZeroStruct != nil { + s.DecodeZeroStruct = opt.DecodeZeroStruct + } + if opt.DecodeDeepZeroInline != nil { + s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline + } + if opt.EncodeOmitDefaultStruct != nil { + s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct + } + if opt.OverwriteDuplicatedInlinedFields != nil { + s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields + } + if opt.AllowUnexportedFields != nil { + s.AllowUnexportedFields = opt.AllowUnexportedFields + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go new file mode 100644 index 0000000..13496d1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// TimeCodecOptions represents all possible options for time.Time encoding and decoding. +type TimeCodecOptions struct { + UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. +} + +// TimeCodec creates a new *TimeCodecOptions +func TimeCodec() *TimeCodecOptions { + return &TimeCodecOptions{} +} + +// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { + t.UseLocalTimeZone = &b + return t +} + +// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { + t := TimeCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.UseLocalTimeZone != nil { + t.UseLocalTimeZone = opt.UseLocalTimeZone + } + } + + return t +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go new file mode 100644 index 0000000..e08b7f1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// UIntCodecOptions represents all possible options for uint encoding and decoding. +type UIntCodecOptions struct { + EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +} + +// UIntCodec creates a new *UIntCodecOptions +func UIntCodec() *UIntCodecOptions { + return &UIntCodecOptions{} +} + +// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { + u.EncodeToMinSize = &b + return u +} + +// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { + u := UIntCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeToMinSize != nil { + u.EncodeToMinSize = opt.EncodeToMinSize + } + } + + return u +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go new file mode 100644 index 0000000..5cdf646 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -0,0 +1,445 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// Copier is a type that allows copying between ValueReaders, ValueWriters, and +// []byte values. +type Copier struct{} + +// NewCopier creates a new copier with the given registry. If a nil registry is provided +// a default registry is used. +func NewCopier() Copier { + return Copier{} +} + +// CopyDocument handles copying a document from src to dst. +func CopyDocument(dst ValueWriter, src ValueReader) error { + return Copier{}.CopyDocument(dst, src) +} + +// CopyDocument handles copying one document from the src to the dst. +func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { + dr, err := src.ReadDocument() + if err != nil { + return err + } + + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + return c.copyDocumentCore(dw, dr) +} + +// CopyArrayFromBytes copies the values from a BSON array represented as a +// []byte to a ValueWriter. +func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { + aw, err := dst.WriteArray() + if err != nil { + return err + } + + err = c.CopyBytesToArrayWriter(aw, src) + if err != nil { + return err + } + + return aw.WriteArrayEnd() +} + +// CopyDocumentFromBytes copies the values from a BSON document represented as a +// []byte to a ValueWriter. +func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + err = c.CopyBytesToDocumentWriter(dw, src) + if err != nil { + return err + } + + return dw.WriteDocumentEnd() +} + +type writeElementFn func(key string) (ValueWriter, error) + +// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an +// ArrayWriter. +func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { + wef := func(_ string) (ValueWriter, error) { + return dst.WriteArrayElement() + } + + return c.copyBytesToValueWriter(src, wef) +} + +// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a +// DocumentWriter. +func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { + wef := func(key string) (ValueWriter, error) { + return dst.WriteDocumentElement(key) + } + + return c.copyBytesToValueWriter(src, wef) +} + +func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { + // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + length, rem, ok := bsoncore.ReadLength(src) + if !ok { + return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) + } + if len(src) < int(length) { + return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) + } + rem = rem[:length-4] + + var t bsontype.Type + var key string + var val bsoncore.Value + for { + t, rem, ok = bsoncore.ReadType(rem) + if !ok { + return io.EOF + } + if t == bsontype.Type(0) { + if len(rem) != 0 { + return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) + } + break + } + + key, rem, ok = bsoncore.ReadKey(rem) + if !ok { + return fmt.Errorf("invalid key found. remaining bytes=%v", rem) + } + + // write as either array element or document element using writeElementFn + vw, err := wef(key) + if err != nil { + return err + } + + val, rem, ok = bsoncore.ReadValue(rem, t) + if !ok { + return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) + } + err = c.CopyValueFromBytes(vw, t, val.Data) + if err != nil { + return err + } + } + return nil +} + +// CopyDocumentToBytes copies an entire document from the ValueReader and +// returns it as bytes. +func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { + return c.AppendDocumentBytes(nil, src) +} + +// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will +// append the result to dst. +func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer vwPool.Put(vw) + + vw.reset(dst) + + err := c.CopyDocument(vw, src) + dst = vw.buf + return dst, err +} + +// AppendArrayBytes copies an array from the ValueReader to dst. +func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer vwPool.Put(vw) + + vw.reset(dst) + + err := c.copyArray(vw, src) + dst = vw.buf + return dst, err +} + +// CopyValueFromBytes will write the value represtend by t and src to dst. +func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { + if wvb, ok := dst.(BytesWriter); ok { + return wvb.WriteValueBytes(t, src) + } + + vr := vrPool.Get().(*valueReader) + defer vrPool.Put(vr) + + vr.reset(src) + vr.pushElement(t) + + return c.CopyValue(dst, vr) +} + +// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a +// []byte. +func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { + return c.AppendValueBytes(nil, src) +} + +// AppendValueBytes functions the same as CopyValueToBytes, but will append the +// result to dst. +func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { + if br, ok := src.(BytesReader); ok { + return br.ReadValueBytes(dst) + } + + vw := vwPool.Get().(*valueWriter) + defer vwPool.Put(vw) + + start := len(dst) + + vw.reset(dst) + vw.push(mElement) + + err := c.CopyValue(vw, src) + if err != nil { + return 0, dst, err + } + + return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil +} + +// CopyValue will copy a single value from src to dst. +func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { + var err error + switch src.Type() { + case bsontype.Double: + var f64 float64 + f64, err = src.ReadDouble() + if err != nil { + break + } + err = dst.WriteDouble(f64) + case bsontype.String: + var str string + str, err = src.ReadString() + if err != nil { + return err + } + err = dst.WriteString(str) + case bsontype.EmbeddedDocument: + err = c.CopyDocument(dst, src) + case bsontype.Array: + err = c.copyArray(dst, src) + case bsontype.Binary: + var data []byte + var subtype byte + data, subtype, err = src.ReadBinary() + if err != nil { + break + } + err = dst.WriteBinaryWithSubtype(data, subtype) + case bsontype.Undefined: + err = src.ReadUndefined() + if err != nil { + break + } + err = dst.WriteUndefined() + case bsontype.ObjectID: + var oid primitive.ObjectID + oid, err = src.ReadObjectID() + if err != nil { + break + } + err = dst.WriteObjectID(oid) + case bsontype.Boolean: + var b bool + b, err = src.ReadBoolean() + if err != nil { + break + } + err = dst.WriteBoolean(b) + case bsontype.DateTime: + var dt int64 + dt, err = src.ReadDateTime() + if err != nil { + break + } + err = dst.WriteDateTime(dt) + case bsontype.Null: + err = src.ReadNull() + if err != nil { + break + } + err = dst.WriteNull() + case bsontype.Regex: + var pattern, options string + pattern, options, err = src.ReadRegex() + if err != nil { + break + } + err = dst.WriteRegex(pattern, options) + case bsontype.DBPointer: + var ns string + var pointer primitive.ObjectID + ns, pointer, err = src.ReadDBPointer() + if err != nil { + break + } + err = dst.WriteDBPointer(ns, pointer) + case bsontype.JavaScript: + var js string + js, err = src.ReadJavascript() + if err != nil { + break + } + err = dst.WriteJavascript(js) + case bsontype.Symbol: + var symbol string + symbol, err = src.ReadSymbol() + if err != nil { + break + } + err = dst.WriteSymbol(symbol) + case bsontype.CodeWithScope: + var code string + var srcScope DocumentReader + code, srcScope, err = src.ReadCodeWithScope() + if err != nil { + break + } + + var dstScope DocumentWriter + dstScope, err = dst.WriteCodeWithScope(code) + if err != nil { + break + } + err = c.copyDocumentCore(dstScope, srcScope) + case bsontype.Int32: + var i32 int32 + i32, err = src.ReadInt32() + if err != nil { + break + } + err = dst.WriteInt32(i32) + case bsontype.Timestamp: + var t, i uint32 + t, i, err = src.ReadTimestamp() + if err != nil { + break + } + err = dst.WriteTimestamp(t, i) + case bsontype.Int64: + var i64 int64 + i64, err = src.ReadInt64() + if err != nil { + break + } + err = dst.WriteInt64(i64) + case bsontype.Decimal128: + var d128 primitive.Decimal128 + d128, err = src.ReadDecimal128() + if err != nil { + break + } + err = dst.WriteDecimal128(d128) + case bsontype.MinKey: + err = src.ReadMinKey() + if err != nil { + break + } + err = dst.WriteMinKey() + case bsontype.MaxKey: + err = src.ReadMaxKey() + if err != nil { + break + } + err = dst.WriteMaxKey() + default: + err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) + } + + return err +} + +func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { + ar, err := src.ReadArray() + if err != nil { + return err + } + + aw, err := dst.WriteArray() + if err != nil { + return err + } + + for { + vr, err := ar.ReadValue() + if err == ErrEOA { + break + } + if err != nil { + return err + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return aw.WriteArrayEnd() +} + +func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { + for { + key, vr, err := dr.ReadElement() + if err == ErrEOD { + break + } + if err != nil { + return err + } + + vw, err := dw.WriteDocumentElement(key) + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go new file mode 100644 index 0000000..750b0d2 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonrw contains abstractions for reading and writing +// BSON and BSON like types from sources. +package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go new file mode 100644 index 0000000..54c76bf --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -0,0 +1,806 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +const maxNestingDepth = 200 + +// ErrInvalidJSON indicates the JSON input is invalid +var ErrInvalidJSON = errors.New("invalid JSON input") + +type jsonParseState byte + +const ( + jpsStartState jsonParseState = iota + jpsSawBeginObject + jpsSawEndObject + jpsSawBeginArray + jpsSawEndArray + jpsSawColon + jpsSawComma + jpsSawKey + jpsSawValue + jpsDoneState + jpsInvalidState +) + +type jsonParseMode byte + +const ( + jpmInvalidMode jsonParseMode = iota + jpmObjectMode + jpmArrayMode +) + +type extJSONValue struct { + t bsontype.Type + v interface{} +} + +type extJSONObject struct { + keys []string + values []*extJSONValue +} + +type extJSONParser struct { + js *jsonScanner + s jsonParseState + m []jsonParseMode + k string + v *extJSONValue + + err error + canonical bool + depth int + maxDepth int + + emptyObject bool + relaxedUUID bool +} + +// newExtJSONParser returns a new extended JSON parser, ready to to begin +// parsing from the first character of the argued json input. It will not +// perform any read-ahead and will therefore not report any errors about +// malformed JSON at this point. +func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { + return &extJSONParser{ + js: &jsonScanner{r: r}, + s: jpsStartState, + m: []jsonParseMode{}, + canonical: canonical, + maxDepth: maxNestingDepth, + } +} + +// peekType examines the next value and returns its BSON Type +func (ejp *extJSONParser) peekType() (bsontype.Type, error) { + var t bsontype.Type + var err error + initialState := ejp.s + + ejp.advanceState() + switch ejp.s { + case jpsSawValue: + t = ejp.v.t + case jpsSawBeginArray: + t = bsontype.Array + case jpsInvalidState: + err = ejp.err + case jpsSawComma: + // in array mode, seeing a comma means we need to progress again to actually observe a type + if ejp.peekMode() == jpmArrayMode { + return ejp.peekType() + } + case jpsSawEndArray: + // this would only be a valid state if we were in array mode, so return end-of-array error + err = ErrEOA + case jpsSawBeginObject: + // peek key to determine type + ejp.advanceState() + switch ejp.s { + case jpsSawEndObject: // empty embedded document + t = bsontype.EmbeddedDocument + ejp.emptyObject = true + case jpsInvalidState: + err = ejp.err + case jpsSawKey: + if initialState == jpsStartState { + return bsontype.EmbeddedDocument, nil + } + t = wrapperKeyBSONType(ejp.k) + + // if $uuid is encountered, parse as binary subtype 4 + if ejp.k == "$uuid" { + ejp.relaxedUUID = true + t = bsontype.Binary + } + + switch t { + case bsontype.JavaScript: + // just saw $code, need to check for $scope at same level + _, err = ejp.readValue(bsontype.JavaScript) + if err != nil { + break + } + + switch ejp.s { + case jpsSawEndObject: // type is TypeJavaScript + case jpsSawComma: + ejp.advanceState() + + if ejp.s == jpsSawKey && ejp.k == "$scope" { + t = bsontype.CodeWithScope + } else { + err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) + } + case jpsInvalidState: + err = ejp.err + default: + err = ErrInvalidJSON + } + case bsontype.CodeWithScope: + err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") + } + } + } + + return t, err +} + +// readKey parses the next key and its type and returns them +func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { + if ejp.emptyObject { + ejp.emptyObject = false + return "", 0, ErrEOD + } + + // advance to key (or return with error) + switch ejp.s { + case jpsStartState: + ejp.advanceState() + if ejp.s == jpsSawBeginObject { + ejp.advanceState() + } + case jpsSawBeginObject: + ejp.advanceState() + case jpsSawValue, jpsSawEndObject, jpsSawEndArray: + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject, jpsSawComma: + ejp.advanceState() + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsDoneState: + return "", 0, io.EOF + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, ErrInvalidJSON + } + case jpsSawKey: // do nothing (key was peeked before) + default: + return "", 0, invalidRequestError("key") + } + + // read key + var key string + + switch ejp.s { + case jpsSawKey: + key = ejp.k + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, invalidRequestError("key") + } + + // check for colon + ejp.advanceState() + if err := ensureColon(ejp.s, key); err != nil { + return "", 0, err + } + + // peek at the value to determine type + t, err := ejp.peekType() + if err != nil { + return "", 0, err + } + + return key, t, nil +} + +// readValue returns the value corresponding to the Type returned by peekType +func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { + if ejp.s == jpsInvalidState { + return nil, ejp.err + } + + var v *extJSONValue + + switch t { + case bsontype.Null, bsontype.Boolean, bsontype.String: + if ejp.s != jpsSawValue { + return nil, invalidRequestError(t.String()) + } + v = ejp.v + case bsontype.Int32, bsontype.Int64, bsontype.Double: + // relaxed version allows these to be literal number values + if ejp.s == jpsSawValue { + v = ejp.v + break + } + fallthrough + case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { + return nil, invalidJSONErrorForType("value", t) + } + + v = ejp.v + + // read end object + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("} after value", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: + if ejp.s != jpsSawKey { + return nil, invalidRequestError(t.String()) + } + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + if t == bsontype.Binary && ejp.s == jpsSawValue { + // convert relaxed $uuid format + if ejp.relaxedUUID { + defer func() { ejp.relaxedUUID = false }() + uuid, err := ejp.v.parseSymbol() + if err != nil { + return nil, err + } + + // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing + // in the 8th, 13th, 18th, and 23rd characters. + // + // See https://tools.ietf.org/html/rfc4122#section-3 + valid := len(uuid) == 36 && + string(uuid[8]) == "-" && + string(uuid[13]) == "-" && + string(uuid[18]) == "-" && + string(uuid[23]) == "-" + if !valid { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") + } + + // remove hyphens + uuidNoHyphens := strings.Replace(uuid, "-", "", -1) + if len(uuidNoHyphens) != 32 { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") + } + + // convert hex to bytes + bytes, err := hex.DecodeString(uuidNoHyphens) + if err != nil { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) + } + + base64 := &extJSONValue{ + t: bsontype.String, + v: base64.StdEncoding.EncodeToString(bytes), + } + subType := &extJSONValue{ + t: bsontype.String, + v: "04", + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + + break + } + + // convert legacy $binary format + base64 := ejp.v + + ejp.advanceState() + if ejp.s != jpsSawComma { + return nil, invalidJSONErrorForType(",", bsontype.Binary) + } + + ejp.advanceState() + key, t, err := ejp.readKey() + if err != nil { + return nil, err + } + if key != "$type" { + return nil, invalidJSONErrorForType("$type", bsontype.Binary) + } + + subType, err := ejp.readValue(t) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + break + } + + // read KV pairs + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONErrorForType("{", t) + } + + keys, vals, err := ejp.readObject(2, true) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) + } + + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + + case bsontype.DateTime: + switch ejp.s { + case jpsSawValue: + v = ejp.v + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject: + keys, vals, err := ejp.readObject(1, true) + if err != nil { + return nil, err + } + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + case jpsSawValue: + if ejp.canonical { + return nil, invalidJSONError("{") + } + v = ejp.v + default: + if ejp.canonical { + return nil, invalidJSONErrorForType("object", t) + } + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("value and then }", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.JavaScript: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue { + return nil, invalidJSONErrorForType("value", t) + } + v = ejp.v + + // read end object or comma and just return + ejp.advanceState() + case jpsSawEndObject: + v = ejp.v + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.CodeWithScope: + if ejp.s == jpsSawKey && ejp.k == "$scope" { + v = ejp.v // this is the $code string from earlier + + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONError("$scope to be embedded document") + } + } else { + return nil, invalidRequestError(t.String()) + } + case bsontype.EmbeddedDocument, bsontype.Array: + return nil, invalidRequestError(t.String()) + } + + return v, nil +} + +// readObject is a utility method for reading full objects of known (or expected) size +// it is useful for extended JSON types such as binary, datetime, regex, and timestamp +func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { + keys := make([]string, numKeys) + vals := make([]*extJSONValue, numKeys) + + if !started { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, nil, invalidJSONError("{") + } + } + + for i := 0; i < numKeys; i++ { + key, t, err := ejp.readKey() + if err != nil { + return nil, nil, err + } + + switch ejp.s { + case jpsSawKey: + v, err := ejp.readValue(t) + if err != nil { + return nil, nil, err + } + + keys[i] = key + vals[i] = v + case jpsSawValue: + keys[i] = key + vals[i] = ejp.v + default: + return nil, nil, invalidJSONError("value") + } + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, nil, invalidJSONError("}") + } + + return keys, vals, nil +} + +// advanceState reads the next JSON token from the scanner and transitions +// from the current state based on that token's type +func (ejp *extJSONParser) advanceState() { + if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { + return + } + + jt, err := ejp.js.nextToken() + + if err != nil { + ejp.err = err + ejp.s = jpsInvalidState + return + } + + valid := ejp.validateToken(jt.t) + if !valid { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + return + } + + switch jt.t { + case jttBeginObject: + ejp.s = jpsSawBeginObject + ejp.pushMode(jpmObjectMode) + ejp.depth++ + + if ejp.depth > ejp.maxDepth { + ejp.err = nestingDepthError(jt.p, ejp.depth) + ejp.s = jpsInvalidState + } + case jttEndObject: + ejp.s = jpsSawEndObject + ejp.depth-- + + if ejp.popMode() != jpmObjectMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttBeginArray: + ejp.s = jpsSawBeginArray + ejp.pushMode(jpmArrayMode) + case jttEndArray: + ejp.s = jpsSawEndArray + + if ejp.popMode() != jpmArrayMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttColon: + ejp.s = jpsSawColon + case jttComma: + ejp.s = jpsSawComma + case jttEOF: + ejp.s = jpsDoneState + if len(ejp.m) != 0 { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttString: + switch ejp.s { + case jpsSawComma: + if ejp.peekMode() == jpmArrayMode { + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + return + } + fallthrough + case jpsSawBeginObject: + ejp.s = jpsSawKey + ejp.k = jt.v.(string) + return + } + fallthrough + default: + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + } +} + +var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ + jpsStartState: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + jttEOF: true, + }, + jpsSawBeginObject: { + jttEndObject: true, + jttString: true, + }, + jpsSawEndObject: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawBeginArray: { + jttBeginObject: true, + jttBeginArray: true, + jttEndArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawEndArray: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawColon: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawComma: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawKey: { + jttColon: true, + }, + jpsSawValue: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsDoneState: {}, + jpsInvalidState: {}, +} + +func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { + switch ejp.s { + case jpsSawEndObject: + // if we are at depth zero and the next token is a '{', + // we can consider it valid only if we are not in array mode. + if jtt == jttBeginObject && ejp.depth == 0 { + return ejp.peekMode() != jpmArrayMode + } + case jpsSawComma: + switch ejp.peekMode() { + // the only valid next token after a comma inside a document is a string (a key) + case jpmObjectMode: + return jtt == jttString + case jpmInvalidMode: + return false + } + } + + _, ok := jpsValidTransitionTokens[ejp.s][jtt] + return ok +} + +// ensureExtValueType returns true if the current value has the expected +// value type for single-key extended JSON types. For example, +// {"$numberInt": v} v must be TypeString +func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { + switch t { + case bsontype.MinKey, bsontype.MaxKey: + return ejp.v.t == bsontype.Int32 + case bsontype.Undefined: + return ejp.v.t == bsontype.Boolean + case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: + return ejp.v.t == bsontype.String + default: + return false + } +} + +func (ejp *extJSONParser) pushMode(m jsonParseMode) { + ejp.m = append(ejp.m, m) +} + +func (ejp *extJSONParser) popMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + m := ejp.m[l-1] + ejp.m = ejp.m[:l-1] + + return m +} + +func (ejp *extJSONParser) peekMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + return ejp.m[l-1] +} + +func extendJSONToken(jt *jsonToken) *extJSONValue { + var t bsontype.Type + + switch jt.t { + case jttInt32: + t = bsontype.Int32 + case jttInt64: + t = bsontype.Int64 + case jttDouble: + t = bsontype.Double + case jttString: + t = bsontype.String + case jttBool: + t = bsontype.Boolean + case jttNull: + t = bsontype.Null + default: + return nil + } + + return &extJSONValue{t: t, v: jt.v} +} + +func ensureColon(s jsonParseState, key string) error { + if s != jpsSawColon { + return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) + } + + return nil +} + +func invalidRequestError(s string) error { + return fmt.Errorf("invalid request to read %s", s) +} + +func invalidJSONError(expected string) error { + return fmt.Errorf("invalid JSON input; expected %s", expected) +} + +func invalidJSONErrorForType(expected string, t bsontype.Type) error { + return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) +} + +func unexpectedTokenError(jt *jsonToken) error { + switch jt.t { + case jttInt32, jttInt64, jttDouble: + return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) + case jttString: + return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) + case jttBool: + return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) + case jttNull: + return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) + case jttEOF: + return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) + default: + return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) + } +} + +func nestingDepthError(p, depth int) error { + return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go new file mode 100644 index 0000000..35832d7 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -0,0 +1,644 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +type ExtJSONValueReaderPool struct { + pool sync.Pool +} + +// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { + return &ExtJSONValueReaderPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(extJSONValueReader) + }, + }, + } +} + +// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { + vr := bvrp.pool.Get().(*extJSONValueReader) + return vr.reset(r, canonical) +} + +// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing +// is inserted into the pool and ok will be false. +func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { + bvr, ok := vr.(*extJSONValueReader) + if !ok { + return false + } + + bvr, _ = bvr.reset(nil, false) + bvrp.pool.Put(bvr) + return true +} + +type ejvrState struct { + mode mode + vType bsontype.Type + depth int +} + +// extJSONValueReader is for reading extended JSON. +type extJSONValueReader struct { + p *extJSONParser + + stack []ejvrState + frame int +} + +// NewExtJSONValueReader creates a new ValueReader from a given io.Reader +// It will interpret the JSON of r as canonical or relaxed according to the +// given canonical flag +func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { + return newExtJSONValueReader(r, canonical) +} + +func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { + ejvr := new(extJSONValueReader) + return ejvr.reset(r, canonical) +} + +func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { + p := newExtJSONParser(r, canonical) + typ, err := p.peekType() + + if err != nil { + return nil, ErrInvalidJSON + } + + var m mode + switch typ { + case bsontype.EmbeddedDocument: + m = mTopLevel + case bsontype.Array: + m = mArray + default: + m = mValue + } + + stack := make([]ejvrState, 1, 5) + stack[0] = ejvrState{ + mode: m, + vType: typ, + } + return &extJSONValueReader{ + p: p, + stack: stack, + }, nil +} + +func (ejvr *extJSONValueReader) advanceFrame() { + if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack + length := len(ejvr.stack) + if length+1 >= cap(ejvr.stack) { + // double it + buf := make([]ejvrState, 2*cap(ejvr.stack)+1) + copy(buf, ejvr.stack) + ejvr.stack = buf + } + ejvr.stack = ejvr.stack[:length+1] + } + ejvr.frame++ + + // Clean the stack + ejvr.stack[ejvr.frame].mode = 0 + ejvr.stack[ejvr.frame].vType = 0 + ejvr.stack[ejvr.frame].depth = 0 +} + +func (ejvr *extJSONValueReader) pushDocument() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mDocument + ejvr.stack[ejvr.frame].depth = ejvr.p.depth +} + +func (ejvr *extJSONValueReader) pushCodeWithScope() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mCodeWithScope +} + +func (ejvr *extJSONValueReader) pushArray() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mArray +} + +func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = m + ejvr.stack[ejvr.frame].vType = t +} + +func (ejvr *extJSONValueReader) pop() { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + ejvr.frame-- + case mDocument, mArray, mCodeWithScope: + ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... + } +} + +func (ejvr *extJSONValueReader) skipObject() { + // read entire object until depth returns to 0 (last ending } or ] seen) + depth := 1 + for depth > 0 { + ejvr.p.advanceState() + + // If object is empty, raise depth and continue. When emptyObject is true, the + // parser has already read both the opening and closing brackets of an empty + // object ("{}"), so the next valid token will be part of the parent document, + // not part of the nested document. + // + // If there is a comma, there are remaining fields, emptyObject must be set back + // to false, and comma must be skipped with advanceState(). + if ejvr.p.emptyObject { + if ejvr.p.s == jpsSawComma { + ejvr.p.emptyObject = false + ejvr.p.advanceState() + } + depth-- + continue + } + + switch ejvr.p.s { + case jpsSawBeginObject, jpsSawBeginArray: + depth++ + case jpsSawEndObject, jpsSawEndArray: + depth-- + } + } +} + +func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { + te := TransitionError{ + name: name, + current: ejvr.stack[ejvr.frame].mode, + destination: destination, + modes: modes, + action: "read", + } + if ejvr.frame != 0 { + te.parent = ejvr.stack[ejvr.frame-1].mode + } + return te +} + +func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { + return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) +} + +func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != t { + return ejvr.typeError(t) + } + default: + modes := []mode{mElement, mValue} + if addModes != nil { + modes = append(modes, addModes...) + } + return ejvr.invalidTransitionErr(destination, callerName, modes) + } + + return nil +} + +func (ejvr *extJSONValueReader) Type() bsontype.Type { + return ejvr.stack[ejvr.frame].vType +} + +func (ejvr *extJSONValueReader) Skip() error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + default: + return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) + } + + defer ejvr.pop() + + t := ejvr.stack[ejvr.frame].vType + switch t { + case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: + // read entire array, doc or CodeWithScope + ejvr.skipObject() + default: + _, err := ejvr.p.readValue(t) + if err != nil { + return err + } + } + + return nil +} + +func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: // allow reading array from top level + case mArray: + return ejvr, nil + default: + if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { + return nil, err + } + } + + ejvr.pushArray() + + return ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { + if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { + return nil, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Binary) + if err != nil { + return nil, 0, err + } + + b, btype, err = v.parseBinary() + + ejvr.pop() + return b, btype, err +} + +func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { + if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { + return false, err + } + + v, err := ejvr.p.readValue(bsontype.Boolean) + if err != nil { + return false, err + } + + if v.t != bsontype.Boolean { + return false, fmt.Errorf("expected type bool, but got type %s", v.t) + } + + ejvr.pop() + return v.v.(bool), nil +} + +func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: + return ejvr, nil + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { + return nil, ejvr.typeError(bsontype.EmbeddedDocument) + } + + ejvr.pushDocument() + return ejvr, nil + default: + return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) + } +} + +func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { + if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { + return "", nil, err + } + + v, err := ejvr.p.readValue(bsontype.CodeWithScope) + if err != nil { + return "", nil, err + } + + code, err = v.parseJavascript() + + ejvr.pushCodeWithScope() + return code, ejvr, err +} + +func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { + if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { + return "", primitive.NilObjectID, err + } + + v, err := ejvr.p.readValue(bsontype.DBPointer) + if err != nil { + return "", primitive.NilObjectID, err + } + + ns, oid, err = v.parseDBPointer() + + ejvr.pop() + return ns, oid, err +} + +func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.DateTime) + if err != nil { + return 0, err + } + + d, err := v.parseDateTime() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { + if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { + return primitive.Decimal128{}, err + } + + v, err := ejvr.p.readValue(bsontype.Decimal128) + if err != nil { + return primitive.Decimal128{}, err + } + + d, err := v.parseDecimal128() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { + if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Double) + if err != nil { + return 0, err + } + + d, err := v.parseDouble() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { + if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int32) + if err != nil { + return 0, err + } + + i, err := v.parseInt32() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int64) + if err != nil { + return 0, err + } + + i, err := v.parseInt64() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { + if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.JavaScript) + if err != nil { + return "", err + } + + code, err = v.parseJavascript() + + ejvr.pop() + return code, err +} + +func (ejvr *extJSONValueReader) ReadMaxKey() error { + if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MaxKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("max") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadMinKey() error { + if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MinKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("min") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadNull() error { + if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Null) + if err != nil { + return err + } + + if v.t != bsontype.Null { + return fmt.Errorf("expected type null but got type %s", v.t) + } + + ejvr.pop() + return nil +} + +func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { + if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { + return primitive.ObjectID{}, err + } + + v, err := ejvr.p.readValue(bsontype.ObjectID) + if err != nil { + return primitive.ObjectID{}, err + } + + oid, err := v.parseObjectID() + + ejvr.pop() + return oid, err +} + +func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { + if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { + return "", "", err + } + + v, err := ejvr.p.readValue(bsontype.Regex) + if err != nil { + return "", "", err + } + + pattern, options, err = v.parseRegex() + + ejvr.pop() + return pattern, options, err +} + +func (ejvr *extJSONValueReader) ReadString() (string, error) { + if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.String) + if err != nil { + return "", err + } + + if v.t != bsontype.String { + return "", fmt.Errorf("expected type string but got type %s", v.t) + } + + ejvr.pop() + return v.v.(string), nil +} + +func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { + if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.Symbol) + if err != nil { + return "", err + } + + symbol, err = v.parseSymbol() + + ejvr.pop() + return symbol, err +} + +func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { + if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { + return 0, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Timestamp) + if err != nil { + return 0, 0, err + } + + t, i, err = v.parseTimestamp() + + ejvr.pop() + return t, i, err +} + +func (ejvr *extJSONValueReader) ReadUndefined() error { + if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Undefined) + if err != nil { + return err + } + + err = v.parseUndefined() + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel, mDocument, mCodeWithScope: + default: + return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) + } + + name, t, err := ejvr.p.readKey() + + if err != nil { + if err == ErrEOD { + if ejvr.stack[ejvr.frame].mode == mCodeWithScope { + _, err := ejvr.p.peekType() + if err != nil { + return "", nil, err + } + } + + ejvr.pop() + } + + return "", nil, err + } + + ejvr.push(mElement, t) + return name, ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mArray: + default: + return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) + } + + t, err := ejvr.p.peekType() + if err != nil { + if err == ErrEOA { + ejvr.pop() + } + + return nil, err + } + + ejvr.push(mValue, t) + return ejvr, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go new file mode 100644 index 0000000..ba39c96 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go @@ -0,0 +1,223 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/golang/go by The Go Authors +// See THIRD-PARTY-NOTICES for original license terms. + +package bsonrw + +import "unicode/utf8" + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML