From 65437791e119fd22d5bf349ed75b533fa7345ce9 Mon Sep 17 00:00:00 2001 From: poliha Date: Fri, 3 May 2019 14:14:41 +0100 Subject: [PATCH] Horizonclient fix merge issues (#1241) * horizonclient/txnbuild README fixes (#1210) * fix client links in top-level readme * update clients README * mark old client deprecated in godoc short summary * fix code of conduct, standardise example * fix code of conduct/contributing links * txnbuild: enables multiple signatures (#1198) This PR enables multiple signatures on a transaction in the new Go SDK. It also lets every `Operation` type have a different source account than its `Transaction`. These changes are intertwined. Without multiple signatures, every operation in a transaction must share the transaction's source account. Differing source accounts are the most common use case for multiple signatures, and they also test it with greatest completeness. * root repo changelog links to sub-projects (#1214) * keystore: add update-keys endpoints to spec (#1215) We need an endpoint to update the encrypted seed when users forget their passwords. * move keystore to exp to fix build (#1223) * Add minimal files to fix build (#1225) * exp/ticker: Orderbook data support (#1193) * exp/ticker: update partial market query to also output base/counter asset types * exp/ticker: add functions to scrape orderbook data * exp/ticker: update orderbookstats param names to match market standards * exp/ticker: create orderbookstats database model and migrations * exp/ticker: consolidate upsert logic and implement orderbookstats upsert * exp/ticker: create CLI command to ingest orderbooks * exp/ticker: update horizon SDK path * exp/ticker: add orderbook stats to market.json generator * exp/ticker: add separate query to retrieve relevant markets * exp/ticker: add orderbook stats to graphql interface * exp/ticker: add Docker support for orderbook ingestion * exp/ticker: fix partial aggregated market query + add tests * exp/ticker: fix globall aggregated market query + add tests * exp/ticker: update ticker binary link and fix crontab comment * exp/ticker: fix issue that would enable inf values to be stored in db * exp/ticker: ensure only store valid orderbook entries are stored * exp/ticker: format gql/static/bindata.go to prevent CI errors * exp/ticker: add updated bindata with pg 9.5-compatible migrations * exp/ticker: ensure markets with 0 orderbook entries can be handled by tickerdb * exp/ticker: remove unnecessary commas from graphql schema * exp/ticker: consolidate bid and ask positions on field names * exp/ticker: simplify logic for creating orderbook requests * exp/ticker: fix how bid and ask volumes are calculated * exp/ticker: allow negative spread values * exp/ticker: fix close_time fallback on markets query * exp/ticker: add page size limit to orderbook requests * Make PR template a bit more helpful. (#1238) * Make PR template a bit more helpful. Clean up some typos, and point to an example docs folder in the go repo rather than just the external docs hosted on our site. * Fix typo. * Agh, formatting. * horizonclient: update Fund method (#1213) * update client.Fund * update change log * fix space * horizonclient: add more documentation (#1226) * Remove old stream method * more comments * fix format * split comment * horizonclient: Set default HTTP client (#1228) * set default HTTP client * changes from review * changelog for txnbuild 1.1 (#1231) * horizonclient/txnbuild README fixes (#1210) * fix client links in top-level readme * update clients README * mark old client deprecated in godoc short summary * fix code of conduct, standardise example * fix code of conduct/contributing links * txnbuild: enables multiple signatures (#1198) This PR enables multiple signatures on a transaction in the new Go SDK. It also lets every `Operation` type have a different source account than its `Transaction`. These changes are intertwined. Without multiple signatures, every operation in a transaction must share the transaction's source account. Differing source accounts are the most common use case for multiple signatures, and they also test it with greatest completeness. * root repo changelog links to sub-projects (#1214) * keystore: add update-keys endpoints to spec (#1215) We need an endpoint to update the encrypted seed when users forget their passwords. * move keystore to exp to fix build (#1223) * Add minimal files to fix build (#1225) * changelog for txnbuild 1.1 * update change log (#1233) * horizonclient: add version (#1229) * add package version * fix go fmt * clients/horizonclient: support dynamic effects (#1217) * update effects in protocols/horizon * update client interface * update tests * update changelogs * fix typo --- .github/pull_request_template.md | 24 ++- exp/ticker/cmd/generate.go | 2 +- exp/ticker/cmd/ingest.go | 24 +++ exp/ticker/docker/conf/crontab.txt | 3 + exp/ticker/docker/setup | 2 +- exp/ticker/docker/start | 14 ++ exp/ticker/internal/actions_market.go | 10 + exp/ticker/internal/actions_orderbook.go | 65 ++++++ exp/ticker/internal/gql/main.go | 15 ++ exp/ticker/internal/gql/resolvers_market.go | 14 ++ exp/ticker/internal/gql/static/bindata.go | 63 +++--- exp/ticker/internal/gql/static/schema.gql | 13 ++ exp/ticker/internal/main.go | 8 + exp/ticker/internal/scraper/main.go | 24 +++ .../internal/scraper/orderbook_scraper.go | 119 +++++++++++ exp/ticker/internal/tickerdb/helpers.go | 20 ++ exp/ticker/internal/tickerdb/main.go | 31 +++ .../20190425110313-add_orderbook_stats.sql | 26 +++ ...26092321-add_aggregated_orderbook_view.sql | 20 ++ .../internal/tickerdb/migrations/bindata.go | 89 +++++++- exp/ticker/internal/tickerdb/queries_asset.go | 26 +-- .../internal/tickerdb/queries_market.go | 106 ++++++++-- .../internal/tickerdb/queries_market_test.go | 156 ++++++++++++++ .../internal/tickerdb/queries_orderbook.go | 7 + .../tickerdb/queries_orderbook_test.go | 195 ++++++++++++++++++ exp/ticker/internal/utils/main.go | 10 + 26 files changed, 997 insertions(+), 89 deletions(-) create mode 100644 exp/ticker/internal/actions_orderbook.go create mode 100644 exp/ticker/internal/scraper/orderbook_scraper.go create mode 100644 exp/ticker/internal/tickerdb/migrations/20190425110313-add_orderbook_stats.sql create mode 100644 exp/ticker/internal/tickerdb/migrations/20190426092321-add_aggregated_orderbook_view.sql create mode 100644 exp/ticker/internal/tickerdb/queries_orderbook.go create mode 100644 exp/ticker/internal/tickerdb/queries_orderbook_test.go diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 9f748c43a6..f8c67d5838 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,22 +1,28 @@ -If you're making a doc PR or something tiny where the below is irrelevant, just delete this template and use a short description. +If you're making a doc PR or something tiny where the below is irrelevant, just delete this +template and use a short description. ### PR Structure -* [ ] This PR has reasonably narrow scope (if not, break it down into smaller PRs) -* [ ] This PR avoids mixing refactoring changes with feature changes (split into two PRs otherwise) -* [ ] This PR's title starts with name of package that is most changed in the PR, ex. `services/friendbot` - +* [ ] This PR has reasonably narrow scope (if not, break it down into smaller PRs). +* [ ] This PR avoids mixing refactoring changes with feature changes (split into two PRs + otherwise). +* [ ] This PR's title starts with name of package that is most changed in the PR, ex. + `services/friendbot` ### Thoroughness * [ ] This PR adds tests for the most critical parts of the new functionality or fixes. -* [ ] I've updated any docs ([developer docs](https://www.stellar.org/developers/reference/), `.md` files, etc...) affected by this change +* [ ] I've updated any docs ([developer docs](https://www.stellar.org/developers/reference/), `.md` + files, etc... affected by this change). Take a look in the `docs` folder for a given service, + like [this one](https://github.com/stellar/go/tree/master/services/horizon/internal/docs). ### Release planning -* [ ] I've updated the relevant CHANGELOG ([here](services/horizon/CHANGELOG.md) for Horizon) if needed with deprecations, added features, breaking changes, and DB schema changes -* [ ] I've decided if this PR requires a new major/minor version according to [semver](https://semver.org/), or if it's monly a patch change. The PR is targeted at the next release branch if it's not a patch change. - +* [ ] I've updated the relevant CHANGELOG ([here](services/horizon/CHANGELOG.md) for Horizon) if + needed with deprecations, added features, breaking changes, and DB schema changes. +* [ ] I've decided if this PR requires a new major/minor version according to + [semver](https://semver.org/), or if it's mainly a patch change. The PR is targeted at the next + release branch if it's not a patch change. ## Summary diff --git a/exp/ticker/cmd/generate.go b/exp/ticker/cmd/generate.go index b58c3c24d3..d5c8757c54 100644 --- a/exp/ticker/cmd/generate.go +++ b/exp/ticker/cmd/generate.go @@ -76,7 +76,7 @@ var cmdGenerateAssetData = &cobra.Command{ Logger.Infof("Starting asset data generation, outputting to: %s\n", AssetsOutFile) err = ticker.GenerateAssetsFile(&session, Logger, AssetsOutFile) if err != nil { - Logger.Fatal("could not generate market data:", err) + Logger.Fatal("could not generate asset data:", err) } }, } diff --git a/exp/ticker/cmd/ingest.go b/exp/ticker/cmd/ingest.go index 8f9190d112..618678108d 100644 --- a/exp/ticker/cmd/ingest.go +++ b/exp/ticker/cmd/ingest.go @@ -16,6 +16,7 @@ func init() { rootCmd.AddCommand(cmdIngest) cmdIngest.AddCommand(cmdIngestAssets) cmdIngest.AddCommand(cmdIngestTrades) + cmdIngest.AddCommand(cmdIngestOrderbooks) cmdIngestTrades.Flags().BoolVar( &ShouldStream, @@ -96,3 +97,26 @@ var cmdIngestTrades = &cobra.Command{ } }, } + +var cmdIngestOrderbooks = &cobra.Command{ + Use: "orderbooks", + Short: "Refreshes the orderbook stats database with new data retrieved from Horizon.", + Run: func(cmd *cobra.Command, args []string) { + Logger.Info("Refreshing the asset database") + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + defer session.DB.Close() + + err = ticker.RefreshOrderbookEntries(&session, Client, Logger) + if err != nil { + Logger.Fatal("could not refresh error database:", err) + } + }, +} diff --git a/exp/ticker/docker/conf/crontab.txt b/exp/ticker/docker/conf/crontab.txt index be442ec159..8a1481f158 100644 --- a/exp/ticker/docker/conf/crontab.txt +++ b/exp/ticker/docker/conf/crontab.txt @@ -4,6 +4,9 @@ # Refresh the database of assets, hourly: @hourly /opt/stellar/bin/ticker ingest assets --db-url=postgres://127.0.0.1:5432/ticker > /home/stellar/last-ingest-assets.log 2>&1 +# Refresh the database of orderbooks, every 10 minutes: +*/10 * * * * /opt/stellar/bin/ticker ingest orderbooks --db-url=postgres://127.0.0.1:5432/ticker > /home/stellar/last-ingest-orderbooks.log 2>&1 + # Backfill the database of trades (including possible new assets), every 6 hours: 0 */6 * * * /opt/stellar/bin/ticker ingest trades --db-url=postgres://127.0.0.1:5432/ticker > /home/stellar/last-ingest-trades.log 2>&1 diff --git a/exp/ticker/docker/setup b/exp/ticker/docker/setup index 576816f052..acef2e9fab 100644 --- a/exp/ticker/docker/setup +++ b/exp/ticker/docker/setup @@ -10,7 +10,7 @@ mkdir -p /opt/stellar/www chown -R stellar:stellar /opt/stellar/www mkdir -p /opt/stellar/postgresql/data -wget -O ticker.tar.gz https://github.com/accordeiro/ticker-releases/releases/download/v0.4-alpha/ticker.tar.gz +wget -O ticker.tar.gz https://github.com/accordeiro/ticker-releases/releases/download/v0.5.3-alpha/ticker.tar.gz tar -xvzf ticker.tar.gz mv ticker /opt/stellar/bin/ticker chmod +x /opt/stellar/bin/ticker diff --git a/exp/ticker/docker/start b/exp/ticker/docker/start index 03cbd7402a..efbb0edde6 100644 --- a/exp/ticker/docker/start +++ b/exp/ticker/docker/start @@ -28,6 +28,7 @@ function main() { populate_assets populate_trades + populate_orderbooks generate_assets_file generate_markets_file @@ -70,6 +71,19 @@ function populate_trades() { } +function populate_orderbooks() { + if [ -f $PGHOME/.orderbooks-populated ]; then + echo "ticker: orderbooks already pre-populated" + return 0 + fi + echo "" + echo "Populating initial orderbook database" + echo "" + sudo -u stellar $STELLAR_BIN/ticker ingest orderbooks --db-url=$PGURL + touch $PGHOME/.orderbooks-populated +} + + function generate_assets_file() { if [ -f $STELLAR_HOME/www/assets.json ]; then echo "ticker: assets.json already pre-populated" diff --git a/exp/ticker/internal/actions_market.go b/exp/ticker/internal/actions_market.go index f5bc8c5106..ee892d1627 100644 --- a/exp/ticker/internal/actions_market.go +++ b/exp/ticker/internal/actions_market.go @@ -59,6 +59,8 @@ func GenerateMarketSummary(s *tickerdb.TickerSession) (ms MarketSummary, err err func dbMarketToMarketStats(m tickerdb.Market) MarketStats { closeTime := utils.TimeToUnixEpoch(m.LastPriceCloseTime) + + spread, spreadMidPoint := utils.CalcSpread(m.HighestBid, m.LowestAsk) return MarketStats{ TradePairName: m.TradePair, BaseVolume24h: m.BaseVolume24h, @@ -77,6 +79,14 @@ func dbMarketToMarketStats(m tickerdb.Market) MarketStats { Change7d: m.PriceChange7d, Price: m.LastPrice, Close: m.LastPrice, + BidCount: m.NumBids, + BidVolume: m.BidVolume, + BidMax: m.HighestBid, + AskCount: m.NumAsks, + AskVolume: m.AskVolume, + AskMin: m.LowestAsk, + Spread: spread, + SpreadMidPoint: spreadMidPoint, CloseTime: closeTime, } } diff --git a/exp/ticker/internal/actions_orderbook.go b/exp/ticker/internal/actions_orderbook.go new file mode 100644 index 0000000000..3ca66de306 --- /dev/null +++ b/exp/ticker/internal/actions_orderbook.go @@ -0,0 +1,65 @@ +package ticker + +import ( + "time" + + horizonclient "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/exp/ticker/internal/scraper" + "github.com/stellar/go/exp/ticker/internal/tickerdb" + "github.com/stellar/go/support/errors" + hlog "github.com/stellar/go/support/log" +) + +// RefreshOrderbookEntries updates the orderbook entries for the relevant markets that were active +// in the past 7-day interval +func RefreshOrderbookEntries(s *tickerdb.TickerSession, c *horizonclient.Client, l *hlog.Entry) error { + sc := scraper.ScraperConfig{ + Client: c, + Logger: l, + } + + // Retrieve relevant markets for the past 7 days (168 hours): + mkts, err := s.Retrieve7DRelevantMarkets() + if err != nil { + return errors.Wrap(err, "could not retrieve partial markets") + } + + for _, mkt := range mkts { + ob, err := sc.FetchOrderbookForAssets( + mkt.BaseAssetType, + mkt.BaseAssetCode, + mkt.BaseAssetIssuer, + mkt.CounterAssetType, + mkt.CounterAssetCode, + mkt.CounterAssetIssuer, + ) + if err != nil { + l.Error(errors.Wrap(err, "could not fetch orderbook for assets")) + continue + } + + dbOS := orderbookStatsToDBOrderbookStats(ob, mkt.BaseAssetID, mkt.CounterAssetID) + err = s.InsertOrUpdateOrderbookStats(&dbOS, []string{"base_asset_id", "counter_asset_id"}) + if err != nil { + l.Error(errors.Wrap(err, "could not insert orderbook stats into db")) + } + } + + return nil +} + +func orderbookStatsToDBOrderbookStats(os scraper.OrderbookStats, bID, cID int32) tickerdb.OrderbookStats { + return tickerdb.OrderbookStats{ + BaseAssetID: bID, + CounterAssetID: cID, + NumBids: os.NumBids, + BidVolume: os.BidVolume, + HighestBid: os.HighestBid, + NumAsks: os.NumAsks, + AskVolume: os.AskVolume, + LowestAsk: os.LowestAsk, + Spread: os.Spread, + SpreadMidPoint: os.SpreadMidPoint, + UpdatedAt: time.Now(), + } +} diff --git a/exp/ticker/internal/gql/main.go b/exp/ticker/internal/gql/main.go index c538286922..eb600114a1 100644 --- a/exp/ticker/internal/gql/main.go +++ b/exp/ticker/internal/gql/main.go @@ -38,6 +38,7 @@ type asset struct { Countries string Status string IssuerID int32 + OrderbookStats orderbookStats } // partialMarket represents the aggregated market data for a @@ -58,6 +59,20 @@ type partialMarket struct { Close float64 IntervalStart graphql.Time FirstLedgerCloseTime graphql.Time + OrderbookStats orderbookStats +} + +// orderbookStats represents the orderbook stats for a +// specific pair of assets (aggregated or not) +type orderbookStats struct { + BidCount BigInt + BidVolume float64 + BidMax float64 + AskCount BigInt + AskVolume float64 + AskMin float64 + Spread float64 + SpreadMidPoint float64 } type resolver struct { diff --git a/exp/ticker/internal/gql/resolvers_market.go b/exp/ticker/internal/gql/resolvers_market.go index ba666056a8..866ee555da 100644 --- a/exp/ticker/internal/gql/resolvers_market.go +++ b/exp/ticker/internal/gql/resolvers_market.go @@ -5,6 +5,7 @@ import ( "github.com/graph-gophers/graphql-go" "github.com/stellar/go/exp/ticker/internal/tickerdb" + "github.com/stellar/go/exp/ticker/internal/utils" ) // Markets resolves the markets() GraphQL query. @@ -83,6 +84,18 @@ func validateNumHoursAgo(n *int32) (int, error) { // dbMarketToPartialMarket converts a tickerdb.PartialMarket to a *partialMarket func dbMarketToPartialMarket(dbMarket tickerdb.PartialMarket) *partialMarket { + spread, spreadMidPoint := utils.CalcSpread(dbMarket.HighestBid, dbMarket.LowestAsk) + os := orderbookStats{ + BidCount: BigInt(dbMarket.NumBids), + BidVolume: dbMarket.BidVolume, + BidMax: dbMarket.HighestBid, + AskCount: BigInt(dbMarket.NumAsks), + AskVolume: dbMarket.AskVolume, + AskMin: dbMarket.LowestAsk, + Spread: spread, + SpreadMidPoint: spreadMidPoint, + } + return &partialMarket{ TradePair: dbMarket.TradePairName, BaseAssetCode: dbMarket.BaseAssetCode, @@ -99,5 +112,6 @@ func dbMarketToPartialMarket(dbMarket tickerdb.PartialMarket) *partialMarket { Close: dbMarket.Close, IntervalStart: graphql.Time{Time: dbMarket.IntervalStart}, FirstLedgerCloseTime: graphql.Time{Time: dbMarket.FirstLedgerCloseTime}, + OrderbookStats: os, } } diff --git a/exp/ticker/internal/gql/static/bindata.go b/exp/ticker/internal/gql/static/bindata.go index 988efc76d9..a593582b36 100644 --- a/exp/ticker/internal/gql/static/bindata.go +++ b/exp/ticker/internal/gql/static/bindata.go @@ -116,7 +116,7 @@ func bindataGraphiqlhtml() (*asset, error) { size: 1182, md5checksum: "", mode: os.FileMode(420), - modTime: time.Unix(1556028957, 0), + modTime: time.Unix(1556057220, 0), } a := &asset{bytes: bytes, info: info} @@ -125,33 +125,36 @@ func bindataGraphiqlhtml() (*asset, error) { } var _bindataSchemagql = []byte( - "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x54\x4d\x6f\xe3\x36\x10\x3d\x4b\xbf\x62\x82\xbd\x24\x17\x1f\x8a\x9e" + - "\x84\xb6\x80\x93\xb4\x68\xd0\xec\xa2\x5d\x67\x8b\x02\x41\x51\x8c\xc5\xb1\x44\x84\x1f\xda\xe1\xd0\x59\xa3\xc8\x7f" + - "\x2f\x48\xc9\x59\xca\x4a\xdd\x73\x4f\x12\xdf\xcc\x1b\x72\x1e\xdf\x30\xb4\x3d\x59\x84\xbf\xeb\xea\x73\x24\x3e\x34" + - "\x50\xfd\x96\xbe\xf5\x4b\x5d\xcb\x61\x20\xc8\xab\x14\x7e\x07\x4c\xc2\x9a\xf6\x04\x68\x0c\xec\xd1\x68\x85\x42\x0a" + - "\x30\x04\x92\x00\xde\x81\xf4\x04\x1b\x21\x63\x90\xc1\x91\x3c\x7b\x7e\x5a\xd5\xd5\x18\x6f\xe0\x71\x9d\x7e\x2e\xfe" + - "\xbc\xa8\xcf\x14\xd3\x21\x44\xe2\x33\xd5\xa6\x84\x06\x1e\xef\xf2\xdf\xa2\x9e\x30\x2a\x82\x20\x28\x01\x76\xec\x6d" + - "\xae\x63\x30\x08\x7c\xe7\xa2\xfd\xd9\x47\x0e\xeb\xce\xff\x00\x7d\xfa\x4b\xcc\x4b\x45\x3b\x8c\x46\xe0\x7b\xf8\xe6" + - "\xdb\x11\xbe\x5a\x81\x1f\x44\x7b\x87\xc6\x1c\x60\x60\xbf\xd7\x8a\xa0\xf5\xd1\x09\x31\xa0\x53\x89\xb7\xc5\x40\x63" + - "\xf3\xa0\xdd\xce\xc3\xce\x33\xec\xb4\x11\x62\xed\xba\x55\x5d\x59\xe4\x27\x92\x70\x59\x57\x55\x4a\xcd\xdd\xdf\x78" + - "\x45\x0d\x6c\x24\xa5\x94\xf8\xd8\x4b\x11\x99\xf6\x7a\x8b\x54\x86\x16\xbc\xa2\xc5\x06\xee\x9c\xd4\xd5\x55\x03\x8f" + - "\xef\xf3\x51\x16\xca\x77\x1d\x53\x97\x65\x9f\x89\xe6\xf9\x5f\x34\x4b\xec\xac\xcf\x9b\xf2\x24\x8e\x43\x4b\xe0\x77" + - "\xf9\x7f\xac\x39\xa0\x66\xb8\xa4\x55\x52\xe4\x1d\xfc\x71\xff\xfe\xaf\xeb\x87\x9b\xab\xb9\x58\xc0\x14\xa2\x91\xb0" + - "\xaa\x2b\xd1\xed\x13\x71\xd2\x2c\x11\x3f\xa0\xa5\xff\x6c\x6e\xfd\xda\xc6\x6b\x9b\x2f\x75\x1d\x5a\x4c\xc6\xb9\xd6" + - "\x5d\x4a\x9c\x56\x0f\xda\xd2\xe4\xeb\x2c\x5f\xf2\x75\x5b\xa8\x7b\x71\xf4\xd7\xba\xcd\x2a\x17\x78\x22\x15\x4b\x17" + - "\xed\x94\x13\xf2\x51\x2e\xea\x0a\xa3\xf4\x1f\xe9\x73\xd4\x4c\xaa\x81\x6b\xef\x0d\xa1\x7b\xc5\xf7\xbe\xc5\xad\xa1" + - "\x59\xc0\x8e\x7b\xfc\x64\x3c\xe6\x02\xe3\x65\x3b\x61\x6f\x0c\xa9\xeb\xc3\xad\xb7\xa8\xdd\x8c\xe2\xda\xde\x2f\x5d" + - "\x31\x8f\x3c\xcc\x8f\xaa\x43\x46\xd7\x39\x61\x7e\x34\xa5\xc3\x60\xf0\x70\x4b\xad\xb6\x68\x42\x33\xc9\x95\xfa\x2b" + - "\x94\x4f\x89\x14\xda\x62\xd9\x7a\xa7\x74\x32\x40\x28\xc0\x9d\xfe\x42\xea\x43\xb4\xdb\x64\xc8\xd7\x42\x16\xbf\x2c" + - "\x30\x1d\x3e\x39\xa3\xad\x96\xf9\x69\x98\x14\xd9\xec\xab\x3b\x17\x84\x63\x7b\xba\x43\xeb\x8d\x41\x21\x46\xb3\x56" + - "\x8a\x29\x04\x3a\x1b\xdd\xe8\xce\xa1\x44\x3e\xc9\x8a\x2e\xf9\xbf\xc4\x92\xef\x63\x58\x98\xe0\xee\x76\xba\xda\xe3" + - "\x5b\x38\xfa\x2b\x99\x26\x7b\xfb\x57\xd4\x5c\x90\xde\x1c\xf2\x12\x9f\x0f\xeb\xf1\x2c\x6f\x0c\xf9\x49\x68\xc1\x4b" + - "\x15\x7f\xf7\x26\xa6\x2b\x3a\x9a\x67\x22\x9c\xc2\xf9\xa0\x37\xa3\xcf\x46\xf1\xfd\x40\xee\x6b\xdc\xf8\xe7\xaf\x8b" + - "\x5e\x77\x7d\x51\xb1\x47\xd7\x95\x3b\x18\x1f\x8a\xa5\x4e\xdb\xed\xd1\x6c\x04\x59\x9a\x3c\x5a\xd9\x04\x1c\xe4\x9e" + - "\x54\x47\x7c\x93\xf2\x13\x7c\x0c\x1e\x65\x3c\x1d\xd8\x73\x82\xfe\x8f\xdb\x1c\xaf\x2d\x35\x37\xc4\xad\xd1\xed\x2f" + - "\x74\x28\x1f\x90\xf9\x80\x45\x36\xe5\x63\xe3\xad\xf9\xf4\xf1\xbe\x1c\x2e\x52\xc4\x98\x06\x62\x43\xbc\x9f\xb9\x21" + - "\xbd\x2f\x0b\x50\x18\x5d\xd8\x11\x2f\x02\xcf\xb4\x5d\x47\xe9\x7f\x74\x6a\xf0\x7a\xf6\xc2\x29\x1a\x7c\xd0\xb2\x60" + - "\x78\xee\x1e\x9e\xb5\x48\x09\xbe\xd4\xff\x04\x00\x00\xff\xff\x75\xd6\x43\x72\x38\x08\x00\x00") + "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x54\x4f\x6f\xe3\xb6\x13\x3d\x4b\x9f\x62\x82\xbd\x24\x17\x1f\x7e\xf8" + + "\x9d\x84\xb6\x80\x93\xb4\x68\xd0\x78\xbb\x5d\x67\x8b\x02\x41\x51\x8c\xc5\xb1\x3c\x30\x45\x6a\x87\xa4\x13\x63\x91" + + "\xef\x5e\x90\x92\x1d\x4a\x4a\xd3\x7b\x4f\xd2\xfc\x79\xc3\x99\xc7\xc7\x71\xf5\x8e\x5a\x84\x6f\x65\xf1\x35\x90\x1c" + + "\x2b\x28\x7e\x8b\xdf\xf2\xa5\x2c\xfd\xb1\x23\x48\x56\x0c\x7f\x00\x21\x2f\x4c\x07\x02\xd4\x1a\x0e\xa8\x59\xa1\x27" + + "\x05\xe8\x1c\x79\x07\xd6\x80\xdf\x11\xac\x3d\x69\x8d\x02\x86\xfc\x93\x95\xfd\xa2\x2c\xfa\x78\x05\x8f\xcb\xf8\x73" + + "\xf1\xe7\x45\xf9\x4e\x31\x76\x2e\x90\xbc\x53\x6d\x48\xa8\xe0\xf1\x2e\xfd\xcd\xea\x79\x41\x45\xe0\x3c\x7a\x07\x5b" + + "\xb1\x6d\xaa\xa3\xd1\x79\xf8\xce\x84\xf6\x67\x1b\xc4\x2d\x1b\xfb\x03\xec\xe2\x5f\x44\x5e\x2a\xda\x62\xd0\x1e\xbe" + + "\x87\xff\xfd\xbf\x77\x5f\x2d\xc0\x76\x9e\xad\x41\xad\x8f\xd0\x89\x3d\xb0\x22\xa8\x6d\x30\x9e\x04\xd0\xa8\x88\xdb" + + "\xa0\xa3\x7e\x78\x60\xb3\xb5\xb0\xb5\x02\x5b\xd6\x9e\x84\x4d\xb3\x28\x8b\x16\x65\x4f\xde\x5d\x96\x45\x11\x53\xd3" + + "\xf4\x37\x56\x51\x05\x6b\x1f\x53\x72\x7f\x3f\x4b\x16\x19\xce\x7a\x0b\x94\x87\x66\xb8\x6c\xc4\x0a\xee\x8c\x2f\x8b" + + "\xab\x0a\x1e\x57\xa9\x95\x19\xf3\x4d\x23\xd4\x24\xda\x47\xa4\x59\xf9\x07\xce\x22\x3a\xf1\xf3\x26\x3d\x11\x63\xb0" + + "\x25\xb0\xdb\xf4\xdf\xd7\xec\x90\x05\x2e\x69\x11\x19\xf9\x00\x7f\xdc\xaf\xfe\xba\x7e\xb8\xb9\x1a\x93\x05\x42\x2e" + + "\x68\xef\x16\x65\xe1\xb9\xde\x93\x44\xce\x22\xf0\x23\xb6\xf4\xaf\xc3\x2d\xcf\x63\x9c\xc7\x7c\x29\x4b\x57\x63\x14" + + "\xce\x35\x37\x31\x71\xb0\x1e\xb8\xa5\x41\xd7\x89\xbe\xa8\xeb\x3a\x63\xf7\xe2\xa4\xaf\x65\x9d\x58\xce\xfc\x11\x94" + + "\x99\x26\xb4\x43\x8e\x4b\xad\x5c\x94\x05\x06\xbf\xfb\x4c\x5f\x03\x0b\xa9\x0a\xae\xad\xd5\x84\xe6\xec\x3f\xd8\x1a" + + "\x37\x9a\x46\x81\xb6\x3f\xe3\x27\x6d\x31\x15\xe8\x2f\xdb\x78\xb1\x5a\x93\xba\x3e\xde\xda\x16\xd9\x8c\x20\xa6\xde" + + "\xd9\xb9\x2a\xc6\x91\x87\x71\xab\xec\x92\x77\x99\x12\xc6\xad\x29\x76\x9d\xc6\xe3\x2d\xd5\xdc\xa2\x76\xd5\x40\x57" + + "\x9c\x2f\x63\x3e\x26\x92\xab\x33\xb3\xb6\x46\x71\x14\x80\xcb\x9c\x5b\x7e\x26\xf5\x31\xb4\x9b\x28\xc8\x73\xa1\x16" + + "\x9f\x67\x3e\x76\x5f\x8c\xe6\x96\xfd\xb8\x1b\x21\x45\x6d\xd2\xd5\x9d\x71\x5e\x42\x3d\x3d\xa1\xb6\x5a\xa3\x27\x41" + + "\xbd\x54\x4a\xc8\x39\x7a\x37\xba\xe6\xc6\xa0\x0f\x32\xc9\x0a\x26\xea\x3f\xf7\x45\xdd\x07\x37\x13\xc1\xdd\xed\x70" + + "\xb5\xa7\x5d\xd8\xeb\x2b\x8a\x26\x69\xfb\x13\xb2\x64\xa0\x37\x1f\x79\xee\x1f\x3f\xd6\x53\x2f\x6f\x3c\xf2\x49\x68" + + "\x86\x8b\x15\x7f\xb7\x3a\xc4\x2b\x3a\x89\x67\x00\x4c\xdd\xa9\xd1\x9b\x5e\x67\x3d\xf9\xb6\x23\xf3\x1a\xd7\xf6\xe9" + + "\xd5\xd8\x71\xb3\xcb\x2a\xee\xd0\x34\xf9\x09\xda\xba\xcc\xe4\x78\xdc\x01\xf5\xda\xa3\xf8\x2a\x3d\xad\x24\x02\x71" + + "\xfe\x9e\x54\x43\x72\x13\xf3\xa3\xfb\x1c\xb4\xa2\x48\x36\xd6\xee\xd7\x71\xd1\x54\xf0\xeb\xc8\x7e\xe5\x79\xfa\xa2" + + "\xdf\x63\xfc\xbf\xcc\xc3\xd8\x0f\xdf\x4a\x28\x36\xac\x86\x29\xce\xaf\x69\xc3\x6a\x3a\xed\x86\xd5\x0a\x9f\xf3\xcd" + + "\xb2\x9f\xa2\xd0\xed\xa7\x28\x74\xfb\x15\x67\x9c\xb8\x4e\x08\xd5\xd4\x5e\xb1\xfa\x64\x39\xdb\x5b\xa7\x6e\x7b\x99" + + "\xc6\xbb\xea\xc2\x46\x73\xfd\x0b\x1d\xf3\x85\x39\x5e\x28\x41\x74\xbe\x5c\x6d\xab\xbf\x7c\xbe\xcf\x97\x09\x29\x12" + + "\x8c\x0b\x60\x4d\x72\x18\xa9\x3f\xee\xd3\x99\xd3\x0b\x1a\xb7\x25\x99\x05\x9e\x68\xb3\x0c\x7e\xf7\xa3\x51\x5d\xdf" + + "\x75\xb6\xd3\x3a\xeb\xd8\xcf\x10\x56\x9a\x87\x27\xf6\x3e\x77\xbe\x94\x7f\x07\x00\x00\xff\xff\x3f\x67\xd7\xeb\x28" + + "\x09\x00\x00") func bindataSchemagqlBytes() ([]byte, error) { return bindataRead( @@ -168,10 +171,10 @@ func bindataSchemagql() (*asset, error) { info := bindataFileInfo{ name: "schema.gql", - size: 2104, + size: 2344, md5checksum: "", mode: os.FileMode(420), - modTime: time.Unix(1556055697, 0), + modTime: time.Unix(1556718973, 0), } a := &asset{bytes: bytes, info: info} diff --git a/exp/ticker/internal/gql/static/schema.gql b/exp/ticker/internal/gql/static/schema.gql index 3b28efa701..86a96ba85e 100644 --- a/exp/ticker/internal/gql/static/schema.gql +++ b/exp/ticker/internal/gql/static/schema.gql @@ -75,6 +75,7 @@ type Market { close: Float! intervalStart: Time! firstLedgerCloseTime: Time! + orderbookStats: OrderbookStats! } type AggregatedMarket { @@ -89,6 +90,18 @@ type AggregatedMarket { close: Float! intervalStart: Time! firstLedgerCloseTime: Time! + orderbookStats: OrderbookStats! +} + +type OrderbookStats { + bidCount: BigInt! + bidVolume: Float! + bidMax: Float! + askCount: BigInt! + askVolume: Float! + askMin: Float! + spread: Float! + spreadMidPoint: Float! } type Issuer { diff --git a/exp/ticker/internal/main.go b/exp/ticker/internal/main.go index 58ce1d21fa..096fde4334 100644 --- a/exp/ticker/internal/main.go +++ b/exp/ticker/internal/main.go @@ -32,6 +32,14 @@ type MarketStats struct { Price float64 `json:"price"` Close float64 `json:"close"` CloseTime int64 `json:"close_time"` + BidCount int `json:"bid_count"` + BidVolume float64 `json:"bid_volume"` + BidMax float64 `json:"bid_max"` + AskCount int `json:"ask_count"` + AskVolume float64 `json:"ask_volume"` + AskMin float64 `json:"ask_min"` + Spread float64 `json:"spread"` + SpreadMidPoint float64 `json:"spread_mid_point"` } // Asset Sumary represents the collection of valid assets. diff --git a/exp/ticker/internal/scraper/main.go b/exp/ticker/internal/scraper/main.go index 7a5ce4dd9f..e0fd88fcd1 100644 --- a/exp/ticker/internal/scraper/main.go +++ b/exp/ticker/internal/scraper/main.go @@ -91,6 +91,24 @@ type FinalAsset struct { Status string `json:"status"` } +// OrderbookStats represents the Orderbook stats for a given asset +type OrderbookStats struct { + BaseAssetCode string + BaseAssetType string + BaseAssetIssuer string + CounterAssetCode string + CounterAssetType string + CounterAssetIssuer string + NumBids int + BidVolume float64 + HighestBid float64 + NumAsks int + AskVolume float64 + LowestAsk float64 + Spread float64 + SpreadMidPoint float64 +} + // FetchAllAssets fetches assets from the Horizon public net. If limit = 0, will fetch all assets. func (c *ScraperConfig) FetchAllAssets(limit int, parallelism int) (assets []FinalAsset, err error) { dirtyAssets, err := c.retrieveAssets(limit) @@ -127,3 +145,9 @@ func (c *ScraperConfig) StreamNewTrades(cursor string, h horizonclient.TradeHand c.Logger.Info("Starting to stream trades with cursor at:", cursor) return c.streamTrades(h, cursor) } + +// FetchOrderbookForAssets fetches the orderbook stats for the base and counter assets provided in the parameters +func (c *ScraperConfig) FetchOrderbookForAssets(bType, bCode, bIssuer, cType, cCode, cIssuer string) (OrderbookStats, error) { + c.Logger.Infof("Fetching orderbook info for %s:%s / %s:%s\n", bCode, bIssuer, cCode, cIssuer) + return c.fetchOrderbook(bType, bCode, bIssuer, cType, cCode, cIssuer) +} diff --git a/exp/ticker/internal/scraper/orderbook_scraper.go b/exp/ticker/internal/scraper/orderbook_scraper.go new file mode 100644 index 0000000000..e7dcdc8d8a --- /dev/null +++ b/exp/ticker/internal/scraper/orderbook_scraper.go @@ -0,0 +1,119 @@ +package scraper + +import ( + "math" + "strconv" + + "github.com/pkg/errors" + horizonclient "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/exp/ticker/internal/utils" + hProtocol "github.com/stellar/go/protocols/horizon" +) + +// fetchOrderbook fetches the orderbook stats for the base and counter assets provided in the parameters +func (c *ScraperConfig) fetchOrderbook(bType, bCode, bIssuer, cType, cCode, cIssuer string) (OrderbookStats, error) { + obStats := OrderbookStats{ + BaseAssetCode: bType, + BaseAssetType: bCode, + BaseAssetIssuer: bIssuer, + CounterAssetCode: cType, + CounterAssetType: cCode, + CounterAssetIssuer: cIssuer, + HighestBid: math.Inf(-1), // start with -Inf to make sure we catch the correct max bid + LowestAsk: math.Inf(1), // start with +Inf to make sure we catch the correct min ask + } + r := createOrderbookRequest(bType, bCode, bIssuer, cType, cCode, cIssuer) + summary, err := c.Client.OrderBook(r) + if err != nil { + return obStats, errors.Wrap(err, "could not fetch orderbook summary") + } + + err = calcOrderbookStats(&obStats, summary) + return obStats, errors.Wrap(err, "could not calculate orderbook stats") +} + +// calcOrderbookStats calculates the NumBids, BidVolume, BidMax, NumAsks, AskVolume and AskMin +// statistics for a given OrdebookStats instance +func calcOrderbookStats(obStats *OrderbookStats, summary hProtocol.OrderBookSummary) error { + // Calculate Bid Data: + obStats.NumBids = len(summary.Bids) + if obStats.NumBids == 0 { + obStats.HighestBid = 0 + } + for _, bid := range summary.Bids { + pricef := float64(bid.PriceR.N) / float64(bid.PriceR.D) + if pricef > obStats.HighestBid { + obStats.HighestBid = pricef + } + + amountf, err := strconv.ParseFloat(bid.Amount, 64) + if err != nil { + return errors.Wrap(err, "invalid bid amount") + } + obStats.BidVolume += amountf + } + + // Calculate Ask Data: + obStats.NumAsks = len(summary.Asks) + if obStats.NumAsks == 0 { + obStats.LowestAsk = 0 + } + for _, ask := range summary.Asks { + pricef := float64(ask.PriceR.N) / float64(ask.PriceR.D) + amountf, err := strconv.ParseFloat(ask.Amount, 64) + if err != nil { + return errors.Wrap(err, "invalid ask amount") + } + + // On Horizon, Ask prices are in units of counter, but + // amount is in units of base. Therefore, real amount = amount * price + // See: https://github.com/stellar/go/issues/612 + obStats.AskVolume += pricef * amountf + if pricef < obStats.LowestAsk { + obStats.LowestAsk = pricef + } + } + + obStats.Spread, obStats.SpreadMidPoint = utils.CalcSpread(obStats.HighestBid, obStats.LowestAsk) + + // Clean up remaining infinity values: + if math.IsInf(obStats.LowestAsk, 0) { + obStats.LowestAsk = 0 + } + + if math.IsInf(obStats.HighestBid, 0) { + obStats.HighestBid = 0 + } + + return nil +} + +// createOrderbookRequest generates a horizonclient.OrderBookRequest based on the base +// and counter asset parameters provided +func createOrderbookRequest(bType, bCode, bIssuer, cType, cCode, cIssuer string) horizonclient.OrderBookRequest { + r := horizonclient.OrderBookRequest{ + SellingAssetType: horizonclient.AssetType(bType), + BuyingAssetType: horizonclient.AssetType(cType), + // NOTE (Alex C, 2019-05-02): + // Orderbook requests are currently not paginated on Horizon. + // This limit has been added to ensure we capture at least 200 + // orderbook entries once pagination is added. + Limit: 200, + } + + // The Horizon API requires *AssetCode and *AssetIssuer fields to be empty + // when an Asset is native. As we store "XLM" as the asset code for native, + // we should only add Code and Issuer info in case we're dealing with + // non-native assets. + // See: https://www.stellar.org/developers/horizon/reference/endpoints/orderbook-details.html + if bType != string(horizonclient.AssetTypeNative) { + r.SellingAssetCode = bCode + r.SellingAssetIssuer = bIssuer + } + if cType != string(horizonclient.AssetTypeNative) { + r.BuyingAssetCode = cCode + r.BuyingAssetIssuer = cIssuer + } + + return r +} diff --git a/exp/ticker/internal/tickerdb/helpers.go b/exp/ticker/internal/tickerdb/helpers.go index 34db1d130c..22f8aabf16 100644 --- a/exp/ticker/internal/tickerdb/helpers.go +++ b/exp/ticker/internal/tickerdb/helpers.go @@ -5,6 +5,8 @@ import ( "fmt" "reflect" "strings" + + "github.com/stellar/go/exp/ticker/internal/utils" ) // getDBFieldTags returns all "db" tags for a given struct, optionally excluding the "id". @@ -115,3 +117,21 @@ func getBaseAndCounterCodes(pairName string) (string, string, error) { return assets[0], assets[1], nil } + +// performUpsertQuery introspects a dbStruct interface{} and performs an insert query +// (if the conflictConstraint isn't broken), otherwise it updates the instance on the +// db, preserving the old values for the fields in preserveFields +func (s *TickerSession) performUpsertQuery(dbStruct interface{}, tableName string, conflictConstraint string, preserveFields []string) error { + dbFields := getDBFieldTags(dbStruct, true) + dbFieldsString := strings.Join(dbFields, ", ") + dbValues := getDBFieldValues(dbStruct, true) + + cleanPreservedFields := sanitizeFieldNames(preserveFields) + toUpdateFields := utils.SliceDiff(dbFields, cleanPreservedFields) + + qs := fmt.Sprintf("INSERT INTO %s (", tableName) + dbFieldsString + ")" + qs += " VALUES (" + generatePlaceholders(dbValues) + ")" + qs += " " + createOnConflictFragment(conflictConstraint, toUpdateFields) + ";" + _, err := s.ExecRaw(qs, dbValues...) + return err +} diff --git a/exp/ticker/internal/tickerdb/main.go b/exp/ticker/internal/tickerdb/main.go index 5c156959db..1b2a6569e2 100644 --- a/exp/ticker/internal/tickerdb/main.go +++ b/exp/ticker/internal/tickerdb/main.go @@ -82,6 +82,22 @@ type Trade struct { Price float64 `db:"price"` } +// OrderbookStats represents an entry on the orderbook_stats table +type OrderbookStats struct { + ID int32 `db:"id"` + BaseAssetID int32 `db:"base_asset_id"` + CounterAssetID int32 `db:"counter_asset_id"` + NumBids int `db:"num_bids"` + BidVolume float64 `db:"bid_volume"` + HighestBid float64 `db:"highest_bid"` + NumAsks int `db:"num_asks"` + AskVolume float64 `db:"ask_volume"` + LowestAsk float64 `db:"lowest_ask"` + Spread float64 `db:"spread"` + SpreadMidPoint float64 `db:"spread_mid_point"` + UpdatedAt time.Time `db:"updated_at"` +} + // Market represent the aggregated market data retrieved from the database. // Note: this struct does *not* directly map to a db entity. type Market struct { @@ -102,19 +118,28 @@ type Market struct { PriceChange7d float64 `db:"price_change_7d"` LastPriceCloseTime time.Time `db:"close_time"` LastPrice float64 `db:"last_price"` + NumBids int `db:"num_bids"` + BidVolume float64 `db:"bid_volume"` + HighestBid float64 `db:"highest_bid"` + NumAsks int `db:"num_asks"` + AskVolume float64 `db:"ask_volume"` + LowestAsk float64 `db:"lowest_ask"` } // PartialMarket represents the aggregated market data for a // specific pair of assets (or asset codes) during an arbitrary // time range. +// Note: this struct does *not* directly map to a db entity. type PartialMarket struct { TradePairName string `db:"trade_pair_name"` BaseAssetID int32 `db:"base_asset_id"` BaseAssetCode string `db:"base_asset_code"` BaseAssetIssuer string `db:"base_asset_issuer"` + BaseAssetType string `db:"base_asset_type"` CounterAssetID int32 `db:"counter_asset_id"` CounterAssetCode string `db:"counter_asset_code"` CounterAssetIssuer string `db:"counter_asset_issuer"` + CounterAssetType string `db:"counter_asset_type"` BaseVolume float64 `db:"base_volume"` CounterVolume float64 `db:"counter_volume"` TradeCount int32 `db:"trade_count"` @@ -123,6 +148,12 @@ type PartialMarket struct { High float64 `db:"highest_price"` Change float64 `db:"price_change"` Close float64 `db:"last_price"` + NumBids int `db:"num_bids"` + BidVolume float64 `db:"bid_volume"` + HighestBid float64 `db:"highest_bid"` + NumAsks int `db:"num_asks"` + AskVolume float64 `db:"ask_volume"` + LowestAsk float64 `db:"lowest_ask"` IntervalStart time.Time `db:"interval_start"` FirstLedgerCloseTime time.Time `db:"first_ledger_close_time"` } diff --git a/exp/ticker/internal/tickerdb/migrations/20190425110313-add_orderbook_stats.sql b/exp/ticker/internal/tickerdb/migrations/20190425110313-add_orderbook_stats.sql new file mode 100644 index 0000000000..5dba26e32b --- /dev/null +++ b/exp/ticker/internal/tickerdb/migrations/20190425110313-add_orderbook_stats.sql @@ -0,0 +1,26 @@ + +-- +migrate Up +CREATE TABLE orderbook_stats ( + id serial NOT NULL PRIMARY KEY, + + base_asset_id integer REFERENCES assets (id) NOT NULL, + counter_asset_id integer REFERENCES assets (id) NOT NULL, + + num_bids bigint NOT NULL, + bid_volume double precision NOT NULL, + highest_bid double precision NOT NULL, + + num_asks bigint NOT NULL, + ask_volume double precision NOT NULL, + lowest_ask double precision NOT NULL, + + spread double precision NOT NULL, + spread_mid_point double precision NOT NULL, + + updated_at timestamptz NOT NULL +); +ALTER TABLE ONLY public.orderbook_stats + ADD CONSTRAINT orderbook_stats_base_counter_asset_key UNIQUE (base_asset_id, counter_asset_id); + +-- +migrate Down +DROP TABLE orderbook_stats; diff --git a/exp/ticker/internal/tickerdb/migrations/20190426092321-add_aggregated_orderbook_view.sql b/exp/ticker/internal/tickerdb/migrations/20190426092321-add_aggregated_orderbook_view.sql new file mode 100644 index 0000000000..844fc41c74 --- /dev/null +++ b/exp/ticker/internal/tickerdb/migrations/20190426092321-add_aggregated_orderbook_view.sql @@ -0,0 +1,20 @@ + +-- +migrate Up +CREATE OR REPLACE VIEW aggregated_orderbook AS + SELECT + concat(bAsset.code, '_', cAsset.code) as trade_pair_name, + bAsset.code as base_asset_code, + cAsset.code as counter_asset_code, + COALESCE(sum(os.num_bids), 0) AS num_bids, + COALESCE(sum(os.bid_volume), 0.0) AS bid_volume, + COALESCE(max(os.highest_bid), 0.0) AS highest_bid, + COALESCE(sum(os.num_asks), 0) AS num_asks, + COALESCE(sum(os.ask_volume), 0.0) AS ask_volume, + COALESCE(min(os.lowest_ask), 0.0) AS lowest_ask + FROM orderbook_stats AS os + JOIN assets AS bAsset ON os.base_asset_id = bAsset.id + JOIN assets AS cAsset on os.counter_asset_id = cAsset.id + GROUP BY trade_pair_name, base_asset_code, counter_asset_code; + +-- +migrate Down +DROP VIEW IF EXISTS aggregated_orderbook; diff --git a/exp/ticker/internal/tickerdb/migrations/bindata.go b/exp/ticker/internal/tickerdb/migrations/bindata.go index d0ab7d9afd..ac5d8054ef 100644 --- a/exp/ticker/internal/tickerdb/migrations/bindata.go +++ b/exp/ticker/internal/tickerdb/migrations/bindata.go @@ -8,6 +8,8 @@ // migrations/20190409172610-rename_assets_desc_description.sql // migrations/20190410094830-add_assets_issuer_account_field.sql // migrations/20190411165735-data_seed_and_indices.sql +// migrations/20190425110313-add_orderbook_stats.sql +// migrations/20190426092321-add_aggregated_orderbook_view.sql package bdata @@ -117,7 +119,7 @@ func bindataMigrations20190404184050initialsql() (*asset, error) { size: 820, md5checksum: "", mode: os.FileMode(420), - modTime: time.Unix(1556552251, 0), + modTime: time.Unix(1556565465, 0), } a := &asset{bytes: bytes, info: info} @@ -400,6 +402,87 @@ func bindataMigrations20190411165735dataseedandindicessql() (*asset, error) { return a, nil } +var _bindataMigrations20190425110313addorderbookstatssql = []byte( + "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x92\xcd\x6e\xea\x30\x10\x85\xf7\x7e\x8a\x59\x82\x2e\xdc\x17\x60\x95" + + "\x4b\x7c\x25\xd4\x34\xa1\x26\x59\xb0\xb2\x1c\x3c\x0a\xa3\xfc\x38\xf2\x38\x45\xed\xd3\x57\x44\x15\x15\x69\x0b\x55" + + "\xd7\x3e\xe7\x7c\x23\xf9\x13\xcb\x25\xfc\x69\xa9\xf2\x26\x20\x14\xbd\x58\x2b\x19\xe5\x12\xf2\xe8\x5f\x22\xc1\x79" + + "\x8b\xbe\x74\xae\xd6\x1c\x4c\x60\x98\x09\x00\x00\xb2\xc0\xe8\xc9\x34\x90\x66\x39\xa4\x45\x92\xc0\x56\x6d\x1e\x23" + + "\xb5\x87\x07\xb9\x5f\x88\x31\x54\x1a\x46\x6d\x98\x31\x68\xb2\x40\x5d\xc0\x0a\x3d\x28\xf9\x5f\x2a\x99\xae\xe5\x0e" + + "\xc6\x37\x86\x19\xd9\xf9\x65\x67\x31\x56\x0f\x6e\xe8\x02\xfa\x5f\xb4\xc7\x7a\x37\xb4\xba\x24\xcb\x50\x52\x45\x5d" + + "\x98\x8c\x97\x64\xf5\xb3\x6b\x86\x16\xc1\xba\xa1\x6c\x10\x7a\x8f\x07\x62\x72\xdd\x24\x79\xa4\xea\x88\x1c\xce\x5b" + + "\xb7\xa2\x17\xa6\xe1\xfa\x1b\xa6\xe1\xfa\x87\xcc\xc6\x9d\xce\x48\xc3\xf5\x5d\x24\xf7\x1e\xcd\xcd\xcb\x3e\x52\xba" + + "\x25\xab\x7b\x77\xbe\xec\xde\xec\xd0\x5b\x13\xd0\x6a\x13\x20\x50\x8b\x1c\x4c\xdb\x87\xd7\x4b\x4a\xcc\x57\x22\x4a" + + "\x72\xa9\xde\x0d\xc9\xd2\x64\x0f\xfd\x50\x36\x74\xf8\x3b\xb1\x65\x9c\x8b\xe2\x18\xd6\x59\xba\xcb\x55\xb4\x49\xf3" + + "\xa9\x50\x7a\xb4\xe4\xfa\xbf\x6b\x7c\x81\x22\xdd\x3c\x15\x12\x66\x57\x12\x2d\x3e\x89\x31\x5f\x89\x2b\x7d\x63\x77" + + "\xea\x44\xac\xb2\xed\xd7\xfa\xae\xc4\x5b\x00\x00\x00\xff\xff\x06\x01\x94\xcd\xed\x02\x00\x00") + +func bindataMigrations20190425110313addorderbookstatssqlBytes() ([]byte, error) { + return bindataRead( + _bindataMigrations20190425110313addorderbookstatssql, + "migrations/20190425110313-add_orderbook_stats.sql", + ) +} + +func bindataMigrations20190425110313addorderbookstatssql() (*asset, error) { + bytes, err := bindataMigrations20190425110313addorderbookstatssqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{ + name: "migrations/20190425110313-add_orderbook_stats.sql", + size: 749, + md5checksum: "", + mode: os.FileMode(420), + modTime: time.Unix(1556565462, 0), + } + + a := &asset{bytes: bytes, info: info} + + return a, nil +} + +var _bindataMigrations20190426092321addaggregatedorderbookviewsql = []byte( + "\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x93\xcf\x6e\xb2\x40\x14\xc5\xf7\x3c\xc5\xdd\xa9\xf9\xd0\x7c\x7b\xd3" + + "\x05\xc5\xb1\xa1\xb1\x62\x40\xfb\x67\x35\xb9\x30\x13\x9c\x20\x8c\xe1\x0e\xb5\x8f\xdf\x0c\x44\x81\x88\x65\x45\x0e" + + "\xf7\x77\x39\x99\x73\xc6\x99\xcf\xe1\x5f\xa1\xb2\x0a\x8d\x84\xc3\xd9\xf1\x23\xe6\xed\x19\x84\x11\x44\x6c\xb7\xf1" + + "\x7c\x06\xef\x01\xfb\x00\xcc\xb2\x4a\x66\x68\xa4\xe0\xba\x12\xb2\x4a\xb4\xce\xc1\x8b\x1d\x00\x80\x98\x6d\x98\xbf" + + "\x6f\x5e\xed\x93\xea\x32\x45\x33\x4d\x3c\x22\x69\x16\xa9\x16\xd2\x85\x09\x9f\xb8\x90\x76\xca\x0c\x90\xc0\x54\x28" + + "\x24\x3f\xa3\xaa\x78\x89\x85\x74\x6f\x1b\x7a\xa8\x9d\x4b\x90\x24\x47\x2b\xf1\x66\x5b\xf7\xa7\xe1\x5c\xaa\xeb\xd2" + + "\xc8\x6a\x74\xd4\x0f\xbd\x0d\x8b\x7d\x36\xa5\xba\x98\x6a\x5a\x94\x75\xc1\x13\x25\x68\xe6\xc2\xff\x19\x78\x31\x5c" + + "\x85\xc7\x48\xa2\x04\xff\xd6\xa7\xba\x90\x16\x5a\xb4\x58\x27\x8e\x80\x05\xfe\x58\xf0\xa8\xb2\xa3\x24\x63\xd7\xf7" + + "\xc8\x9e\xfa\xb7\x4d\xa4\x7c\x68\xd3\x0a\x8f\x11\xa4\xfc\xde\x66\x27\x8e\xd9\x54\xa5\x05\x4f\xfa\x62\xfd\x20\xe5" + + "\x3d\xb0\x13\x1b\x6e\x1d\x85\x6f\x70\x2b\x00\x27\x83\x86\xec\x98\xa6\xe6\xf3\x6b\x18\x6c\xa1\x39\xfe\x46\x6d\x73" + + "\x84\x70\x0b\xf6\xf8\xba\x14\x95\x80\xa7\x6b\xc8\x4a\x8c\x91\x6d\xb2\xa0\x4b\x4b\x0e\x73\x6d\xe0\x74\x00\xbf\x44" + + "\xe1\x61\x07\xcf\x5f\x77\x8d\xba\xab\xce\x48\x47\x96\xce\xe0\x0a\xac\xf4\xa5\x74\x56\x51\xb8\x6b\x7b\x1f\xac\x81" + + "\x7d\x06\xf1\x3e\x1e\xbd\x01\x4b\xe7\x37\x00\x00\xff\xff\x72\xc3\x7e\xff\x3f\x03\x00\x00") + +func bindataMigrations20190426092321addaggregatedorderbookviewsqlBytes() ([]byte, error) { + return bindataRead( + _bindataMigrations20190426092321addaggregatedorderbookviewsql, + "migrations/20190426092321-add_aggregated_orderbook_view.sql", + ) +} + +func bindataMigrations20190426092321addaggregatedorderbookviewsql() (*asset, error) { + bytes, err := bindataMigrations20190426092321addaggregatedorderbookviewsqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{ + name: "migrations/20190426092321-add_aggregated_orderbook_view.sql", + size: 831, + md5checksum: "", + mode: os.FileMode(420), + modTime: time.Unix(1556565462, 0), + } + + a := &asset{bytes: bytes, info: info} + + return a, nil +} + // // Asset loads and returns the asset for the given name. // It returns an error if the asset could not be found or @@ -471,6 +554,8 @@ var _bindata = map[string]func() (*asset, error){ "migrations/20190409172610-rename_assets_desc_description.sql": bindataMigrations20190409172610renameassetsdescdescriptionsql, "migrations/20190410094830-add_assets_issuer_account_field.sql": bindataMigrations20190410094830addassetsissueraccountfieldsql, "migrations/20190411165735-data_seed_and_indices.sql": bindataMigrations20190411165735dataseedandindicessql, + "migrations/20190425110313-add_orderbook_stats.sql": bindataMigrations20190425110313addorderbookstatssql, + "migrations/20190426092321-add_aggregated_orderbook_view.sql": bindataMigrations20190426092321addaggregatedorderbookviewsql, } // @@ -533,6 +618,8 @@ var _bintree = &bintree{Func: nil, Children: map[string]*bintree{ "20190409172610-rename_assets_desc_description.sql": {Func: bindataMigrations20190409172610renameassetsdescdescriptionsql, Children: map[string]*bintree{}}, "20190410094830-add_assets_issuer_account_field.sql": {Func: bindataMigrations20190410094830addassetsissueraccountfieldsql, Children: map[string]*bintree{}}, "20190411165735-data_seed_and_indices.sql": {Func: bindataMigrations20190411165735dataseedandindicessql, Children: map[string]*bintree{}}, + "20190425110313-add_orderbook_stats.sql": {Func: bindataMigrations20190425110313addorderbookstatssql, Children: map[string]*bintree{}}, + "20190426092321-add_aggregated_orderbook_view.sql": {Func: bindataMigrations20190426092321addaggregatedorderbookviewsql, Children: map[string]*bintree{}}, }}, }} diff --git a/exp/ticker/internal/tickerdb/queries_asset.go b/exp/ticker/internal/tickerdb/queries_asset.go index 78c1d6ed19..aed494f374 100644 --- a/exp/ticker/internal/tickerdb/queries_asset.go +++ b/exp/ticker/internal/tickerdb/queries_asset.go @@ -1,33 +1,9 @@ package tickerdb -import ( - "strings" - - "github.com/stellar/go/exp/ticker/internal/utils" -) - -// InsertAsset inserts a new Asset into the database -func (s *TickerSession) InsertAsset(a *Asset) (err error) { - tbl := s.GetTable("assets") - _, err = tbl.Insert(a).IgnoreCols("id").Exec() - return -} - // InsertOrUpdateAsset inserts an Asset on the database (if new), // or updates an existing one func (s *TickerSession) InsertOrUpdateAsset(a *Asset, preserveFields []string) (err error) { - dbFields := getDBFieldTags(*a, true) - dbFieldsString := strings.Join(dbFields, ", ") - dbValues := getDBFieldValues(*a, true) - - cleanPreservedFields := sanitizeFieldNames(preserveFields) - toUpdateFields := utils.SliceDiff(dbFields, cleanPreservedFields) - - qs := "INSERT INTO assets (" + dbFieldsString + ")" - qs += " VALUES (" + generatePlaceholders(dbValues) + ")" - qs += " " + createOnConflictFragment("assets_code_issuer_account", toUpdateFields) + ";" - _, err = s.ExecRaw(qs, dbValues...) - return + return s.performUpsertQuery(*a, "assets", "assets_code_issuer_account", preserveFields) } // GetAssetByCodeAndIssuerAccount searches for an Asset with the given code diff --git a/exp/ticker/internal/tickerdb/queries_market.go b/exp/ticker/internal/tickerdb/queries_market.go index ac749d8e87..72a5cfcaaf 100644 --- a/exp/ticker/internal/tickerdb/queries_market.go +++ b/exp/ticker/internal/tickerdb/queries_market.go @@ -92,6 +92,23 @@ func (s *TickerSession) RetrievePartialMarkets( return } +// Retrieve7DRelevantMarkets retrieves the base and counter asset data of the markets +// that were relevant in the last 7-day period. +func (s *TickerSession) Retrieve7DRelevantMarkets() (partialMkts []PartialMarket, err error) { + q := ` + SELECT + ba.id as base_asset_id, ba.type AS base_asset_type, ba.code AS base_asset_code, ba.issuer_account AS base_asset_issuer, + ca.id as counter_asset_id, ca.type AS counter_asset_type, ca.code AS counter_asset_code, ca.issuer_account AS counter_asset_issuer + FROM trades as t + JOIN assets AS ba ON t.base_asset_id = ba.id + JOIN assets AS ca ON t.counter_asset_id = ca.id + WHERE ba.is_valid = TRUE AND ca.is_valid = TRUE AND t.ledger_close_time > now() - interval '7 days' + GROUP BY ba.id, ba.type, ba.code, ba.issuer_account, ca.id, ca.type, ca.code, ca.issuer_account + ` + err = s.SelectRaw(&partialMkts, q) + return +} + var marketQuery = ` SELECT t2.trade_pair_name, @@ -112,7 +129,14 @@ SELECT COALESCE(open_price_7d, 0.0) as open_price_7d, COALESCE(last_price, 0.0) as last_price, - COALESCE(last_close_time, now()) as close_time + COALESCE(last_close_time_24h, last_close_time_7d) as close_time, + + COALESCE(os.num_bids, 0) as num_bids, + COALESCE(os.bid_volume, 0.0) as bid_volume, + COALESCE(os.highest_bid, 0.0) as highest_bid, + COALESCE(os.num_asks, 0) as num_asks, + COALESCE(os.ask_volume, 0.0) as ask_volume, + COALESCE(os.lowest_ask, 0.0) as lowest_ask FROM ( SELECT -- All valid trades for 24h period @@ -125,7 +149,7 @@ FROM ( (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1] AS open_price_24h, (array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] AS last_price, ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change_24h, - max(t.ledger_close_time) AS last_close_time + max(t.ledger_close_time) AS last_close_time_24h FROM trades AS t JOIN assets AS bAsset ON t.base_asset_id = bAsset.id JOIN assets AS cAsset on t.counter_asset_id = cAsset.id @@ -143,24 +167,32 @@ FROM ( max(t.price) AS highest_price_7d, min(t.price) AS lowest_price_7d, (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1] AS open_price_7d, - ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change_7d + ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change_7d, + max(t.ledger_close_time) AS last_close_time_7d FROM trades AS t + LEFT JOIN orderbook_stats AS os + ON t.base_asset_id = os.base_asset_id AND t.counter_asset_id = os.counter_asset_id JOIN assets AS bAsset ON t.base_asset_id = bAsset.id JOIN assets AS cAsset on t.counter_asset_id = cAsset.id WHERE bAsset.is_valid = TRUE AND cAsset.is_valid = TRUE AND t.ledger_close_time > now() - interval '7 days' GROUP BY trade_pair_name - ) t2 ON t1.trade_pair_name = t2.trade_pair_name; + ) t2 ON t1.trade_pair_name = t2.trade_pair_name + LEFT JOIN aggregated_orderbook AS os ON t2.trade_pair_name = os.trade_pair_name; ` var partialMarketQuery = ` SELECT concat(bAsset.code, ':', bAsset.issuer_account, ' / ', cAsset.code, ':', cAsset.issuer_account) as trade_pair_name, + bAsset.id AS base_asset_id, bAsset.code AS base_asset_code, bAsset.issuer_account as base_asset_issuer, + bAsset.type as base_asset_type, + cAsset.id AS counter_asset_id, cAsset.code AS counter_asset_code, cAsset.issuer_account AS counter_asset_issuer, + cAsset.type as counter_asset_type, sum(t.base_amount) AS base_volume, sum(t.counter_amount) AS counter_volume, count(t.base_amount) AS trade_count, @@ -170,29 +202,59 @@ SELECT (array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] AS last_price, ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change, (now() - interval '__NUMHOURS__ hours') AS interval_start, - min(t.ledger_close_time) AS first_ledger_close_time + min(t.ledger_close_time) AS first_ledger_close_time, + COALESCE((array_agg(os.num_bids))[1], 0) AS num_bids, + COALESCE((array_agg(os.bid_volume))[1], 0.0) AS bid_volume, + COALESCE((array_agg(os.highest_bid))[1], 0.0) AS highest_bid, + COALESCE((array_agg(os.num_asks))[1], 0) AS num_asks, + COALESCE((array_agg(os.ask_volume))[1], 0.0) AS ask_volume, + COALESCE((array_agg(os.lowest_ask))[1], 0.0) AS lowest_ask FROM trades AS t + LEFT JOIN orderbook_stats AS os ON t.base_asset_id = os.base_asset_id AND t.counter_asset_id = os.counter_asset_id JOIN assets AS bAsset ON t.base_asset_id = bAsset.id JOIN assets AS cAsset on t.counter_asset_id = cAsset.id __WHERECLAUSE__ -GROUP BY bAsset.code, bAsset.issuer_account, cAsset.code, cAsset.issuer_account; +GROUP BY bAsset.id, bAsset.code, bAsset.issuer_account, bAsset.type, cAsset.id, cAsset.code, cAsset.issuer_account, cAsset.type; ` var aggMarketQuery = ` SELECT - concat(bAsset.code, '_', cAsset.code) as trade_pair_name, - sum(t.base_amount) AS base_volume, - sum(t.counter_amount) AS counter_volume, - count(t.base_amount) AS trade_count, - max(t.price) AS highest_price, - min(t.price) AS lowest_price, - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1] AS open_price, - (array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] AS last_price, - ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change, - (now() - interval '__NUMHOURS__ hours') AS interval_start, - min(t.ledger_close_time) AS first_ledger_close_time -FROM trades AS t - JOIN assets AS bAsset ON t.base_asset_id = bAsset.id - JOIN assets AS cAsset on t.counter_asset_id = cAsset.id -__WHERECLAUSE__ -GROUP BY trade_pair_name;` + t1.trade_pair_name, + t1.base_volume, + t1.counter_volume, + t1.trade_count, + t1.highest_price, + t1.lowest_price, + t1.open_price, + t1.last_price, + t1.price_change, + t1.interval_start, + t1.first_ledger_close_time, + COALESCE(aob.base_asset_code, '') as base_asset_code, + COALESCE(aob.counter_asset_code, '') as counter_asset_code, + COALESCE(aob.num_bids, 0) AS num_bids, + COALESCE(aob.bid_volume, 0.0) AS bid_volume, + COALESCE(aob.highest_bid, 0.0) AS highest_bid, + COALESCE(aob.num_asks, 0) AS num_asks, + COALESCE(aob.ask_volume, 0.0) AS ask_volume, + COALESCE(aob.lowest_ask, 0.0) AS lowest_ask +FROM ( + SELECT + concat(bAsset.code, '_', cAsset.code) as trade_pair_name, + sum(t.base_amount) AS base_volume, + sum(t.counter_amount) AS counter_volume, + count(t.base_amount) AS trade_count, + max(t.price) AS highest_price, + min(t.price) AS lowest_price, + (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1] AS open_price, + (array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] AS last_price, + ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change, + (now() - interval '__NUMHOURS__ hours') AS interval_start, + min(t.ledger_close_time) AS first_ledger_close_time + FROM trades AS t + LEFT JOIN orderbook_stats AS os ON t.base_asset_id = os.base_asset_id AND t.counter_asset_id = os.counter_asset_id + JOIN assets AS bAsset ON t.base_asset_id = bAsset.id + JOIN assets AS cAsset on t.counter_asset_id = cAsset.id + __WHERECLAUSE__ + GROUP BY trade_pair_name +) t1 LEFT JOIN aggregated_orderbook AS aob ON t1.trade_pair_name = aob.trade_pair_name;` diff --git a/exp/ticker/internal/tickerdb/queries_market_test.go b/exp/ticker/internal/tickerdb/queries_market_test.go index b48352e4dc..13c708dca4 100644 --- a/exp/ticker/internal/tickerdb/queries_market_test.go +++ b/exp/ticker/internal/tickerdb/queries_market_test.go @@ -154,6 +154,65 @@ func TestRetrieveMarketData(t *testing.T) { err = session.BulkInsertTrades(trades) require.NoError(t, err) + // Adding some orderbook stats: + obTime := time.Now() + orderbookStats := OrderbookStats{ + BaseAssetID: xlmAsset.ID, + CounterAssetID: ethAsset.ID, + NumBids: 15, + BidVolume: 0.15, + HighestBid: 200.0, + NumAsks: 17, + AskVolume: 30.0, + LowestAsk: 0.1, + Spread: 0.93, + SpreadMidPoint: 0.35, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats( + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH1 OrderbookStats + err = session.GetRaw(&obBTCETH1, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + orderbookStats = OrderbookStats{ + BaseAssetID: xlmAsset.ID, + CounterAssetID: btcAsset.ID, + NumBids: 1, + BidVolume: 0.1, + HighestBid: 20.0, + NumAsks: 1, + AskVolume: 15.0, + LowestAsk: 0.2, + Spread: 0.96, + SpreadMidPoint: 0.36, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats( + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH2 OrderbookStats + err = session.GetRaw(&obBTCETH2, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + assert.NotEqual(t, obBTCETH1.ID, obBTCETH2.ID) + markets, err := session.RetrieveMarketData() require.NoError(t, err) fmt.Println(markets) @@ -233,6 +292,21 @@ func TestRetrieveMarketData(t *testing.T) { assert.True(t, priceChange7dDiff < 0.0000000000001) assert.Equal(t, priceChange24hDiff, priceChange7dDiff) + + // Analysing aggregated orderbook data: + assert.Equal(t, 15, xlmethMkt.NumBids) + assert.Equal(t, 0.15, xlmethMkt.BidVolume) + assert.Equal(t, 200.0, xlmethMkt.HighestBid) + assert.Equal(t, 17, xlmethMkt.NumAsks) + assert.Equal(t, 30.0, xlmethMkt.AskVolume) + assert.Equal(t, 0.1, xlmethMkt.LowestAsk) + + assert.Equal(t, 1, xlmbtcMkt.NumBids) + assert.Equal(t, 0.1, xlmbtcMkt.BidVolume) + assert.Equal(t, 20.0, xlmbtcMkt.HighestBid) + assert.Equal(t, 1, xlmbtcMkt.NumAsks) + assert.Equal(t, 15.0, xlmbtcMkt.AskVolume) + assert.Equal(t, 0.2, xlmbtcMkt.LowestAsk) } func TestRetrievePartialMarkets(t *testing.T) { @@ -389,6 +463,65 @@ func TestRetrievePartialMarkets(t *testing.T) { err = session.BulkInsertTrades(trades) require.NoError(t, err) + // Adding some orderbook stats: + obTime := time.Now() + orderbookStats := OrderbookStats{ + BaseAssetID: btcAsset.ID, + CounterAssetID: ethAsset1.ID, + NumBids: 15, + BidVolume: 0.15, + HighestBid: 200.0, + NumAsks: 17, + AskVolume: 30.0, + LowestAsk: 0.1, + Spread: 0.93, + SpreadMidPoint: 0.35, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats( + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH1 OrderbookStats + err = session.GetRaw(&obBTCETH1, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + orderbookStats = OrderbookStats{ + BaseAssetID: btcAsset.ID, + CounterAssetID: ethAsset2.ID, + NumBids: 1, + BidVolume: 0.1, + HighestBid: 20.0, + NumAsks: 1, + AskVolume: 15.0, + LowestAsk: 0.2, + Spread: 0.96, + SpreadMidPoint: 0.36, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats( + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH2 OrderbookStats + err = session.GetRaw(&obBTCETH2, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + assert.NotEqual(t, obBTCETH1.ID, obBTCETH2.ID) + partialMkts, err := session.RetrievePartialMarkets( nil, nil, nil, nil, 12, ) @@ -441,6 +574,21 @@ func TestRetrievePartialMarkets(t *testing.T) { btceth2Mkt.FirstLedgerCloseTime.Local().Truncate(time.Millisecond), ) + // Analyzing non-aggregated orderbook data + assert.Equal(t, 15, btceth1Mkt.NumBids) + assert.Equal(t, 0.15, btceth1Mkt.BidVolume) + assert.Equal(t, 200.0, btceth1Mkt.HighestBid) + assert.Equal(t, 17, btceth1Mkt.NumAsks) + assert.Equal(t, 30.0, btceth1Mkt.AskVolume) + assert.Equal(t, 0.1, btceth1Mkt.LowestAsk) + + assert.Equal(t, 1, btceth2Mkt.NumBids) + assert.Equal(t, 0.1, btceth2Mkt.BidVolume) + assert.Equal(t, 20.0, btceth2Mkt.HighestBid) + assert.Equal(t, 1, btceth2Mkt.NumAsks) + assert.Equal(t, 15.0, btceth2Mkt.AskVolume) + assert.Equal(t, 0.2, btceth2Mkt.LowestAsk) + // Now let's use the same data, but aggregating by asset pair partialAggMkts, err := session.RetrievePartialAggMarkets(nil, 12) require.NoError(t, err) @@ -475,4 +623,12 @@ func TestRetrievePartialMarkets(t *testing.T) { require.NoError(t, err) assert.Equal(t, 1, len(partialAggMkts)) assert.Equal(t, int32(3), partialAggMkts[0].TradeCount) + + // Analyzing aggregated orderbook data: + assert.Equal(t, 16, partialAggMkt.NumBids) + assert.Equal(t, 0.25, partialAggMkt.BidVolume) + assert.Equal(t, 200.0, partialAggMkt.HighestBid) + assert.Equal(t, 18, partialAggMkt.NumAsks) + assert.Equal(t, 45.0, partialAggMkt.AskVolume) + assert.Equal(t, 0.1, partialAggMkt.LowestAsk) } diff --git a/exp/ticker/internal/tickerdb/queries_orderbook.go b/exp/ticker/internal/tickerdb/queries_orderbook.go new file mode 100644 index 0000000000..e31dfee1c9 --- /dev/null +++ b/exp/ticker/internal/tickerdb/queries_orderbook.go @@ -0,0 +1,7 @@ +package tickerdb + +// InsertOrUpdateOrderbookStats inserts an OrdebookStats entry on the database (if new), +// or updates an existing one +func (s *TickerSession) InsertOrUpdateOrderbookStats(o *OrderbookStats, preserveFields []string) (err error) { + return s.performUpsertQuery(*o, "orderbook_stats", "orderbook_stats_base_counter_asset_key", preserveFields) +} diff --git a/exp/ticker/internal/tickerdb/queries_orderbook_test.go b/exp/ticker/internal/tickerdb/queries_orderbook_test.go new file mode 100644 index 0000000000..bc85230656 --- /dev/null +++ b/exp/ticker/internal/tickerdb/queries_orderbook_test.go @@ -0,0 +1,195 @@ +package tickerdb + +import ( + "testing" + "time" + + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInsertOrUpdateOrderbokStats(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + publicKey := "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + issuerAccount := "AM2FQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + name := "FOO BAR" + code := "XLM" + + // Adding a seed issuer to be used later: + issuer := Issuer{ + PublicKey: publicKey, + Name: name, + } + tbl := session.GetTable("issuers") + _, err = tbl.Insert(issuer).IgnoreCols("id").Exec() + require.NoError(t, err) + var dbIssuer Issuer + err = session.GetRaw(&dbIssuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Creating first asset: + firstTime := time.Now() + a := Asset{ + Code: code, + IssuerAccount: issuerAccount, + IssuerID: dbIssuer.ID, + LastValid: firstTime, + LastChecked: firstTime, + } + err = session.InsertOrUpdateAsset(&a, []string{"code", "issuer_account", "issuer_id"}) + require.NoError(t, err) + + var dbAsset1 Asset + err = session.GetRaw(&dbAsset1, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, code, dbAsset1.Code) + assert.Equal(t, issuerAccount, dbAsset1.IssuerAccount) + assert.Equal(t, dbIssuer.ID, dbAsset1.IssuerID) + assert.Equal( + t, + firstTime.Local().Truncate(time.Millisecond), + dbAsset1.LastValid.Local().Truncate(time.Millisecond), + ) + assert.Equal( + t, + firstTime.Local().Truncate(time.Millisecond), + dbAsset1.LastChecked.Local().Truncate(time.Millisecond), + ) + + // Creating Seconde Asset: + secondTime := time.Now() + a.LastValid = secondTime + a.LastChecked = secondTime + err = session.InsertOrUpdateAsset(&a, []string{"code", "issuer_account", "issuer_id"}) + require.NoError(t, err) + + var dbAsset2 Asset + err = session.GetRaw(&dbAsset2, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Creating an orderbook_stats entry: + obTime := time.Now() + orderbookStats := OrderbookStats{ + BaseAssetID: dbAsset1.ID, + CounterAssetID: dbAsset2.ID, + NumBids: 15, + BidVolume: 0.15, + HighestBid: 200.0, + NumAsks: 17, + AskVolume: 30.0, + LowestAsk: 0.1, + Spread: 0.93, + SpreadMidPoint: 0.35, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats( + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var dbOS OrderbookStats + err = session.GetRaw(&dbOS, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, dbAsset1.ID, dbOS.BaseAssetID) + assert.Equal(t, dbAsset2.ID, dbOS.CounterAssetID) + assert.Equal(t, 15, dbOS.NumBids) + assert.Equal(t, 0.15, dbOS.BidVolume) + assert.Equal(t, 200.0, dbOS.HighestBid) + assert.Equal(t, 17, dbOS.NumAsks) + assert.Equal(t, 30.0, dbOS.AskVolume) + assert.Equal(t, 0.1, dbOS.LowestAsk) + assert.Equal(t, 0.93, dbOS.Spread) + assert.Equal(t, 0.35, dbOS.SpreadMidPoint) + assert.Equal( + t, + obTime.Local().Truncate(time.Millisecond), + dbOS.UpdatedAt.Local().Truncate(time.Millisecond), + ) + + // Making sure we're upserting: + obTime2 := time.Now() + orderbookStats2 := OrderbookStats{ + BaseAssetID: dbAsset1.ID, + CounterAssetID: dbAsset2.ID, + NumBids: 30, + BidVolume: 0.3, + HighestBid: 400.0, + NumAsks: 34, + AskVolume: 60.0, + LowestAsk: 0.2, + Spread: 1.86, + SpreadMidPoint: 0.7, + UpdatedAt: obTime2, + } + err = session.InsertOrUpdateOrderbookStats( + &orderbookStats2, + []string{"base_asset_id", "counter_asset_id", "lowest_ask"}, + ) + require.NoError(t, err) + + var dbOS2 OrderbookStats + err = session.GetRaw(&dbOS2, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, dbOS2.ID, dbOS.ID) // shouldn't create another instance + + assert.Equal(t, dbAsset1.ID, dbOS2.BaseAssetID) + assert.Equal(t, dbAsset2.ID, dbOS2.CounterAssetID) + assert.Equal(t, 30, dbOS2.NumBids) + assert.Equal(t, 0.3, dbOS2.BidVolume) + assert.Equal(t, 400.0, dbOS2.HighestBid) + assert.Equal(t, 34, dbOS2.NumAsks) + assert.Equal(t, 60.0, dbOS2.AskVolume) + assert.Equal(t, 0.1, dbOS2.LowestAsk) // should keep the old value, since on preserveFields + assert.Equal(t, 1.86, dbOS2.Spread) + assert.Equal(t, 0.7, dbOS2.SpreadMidPoint) + assert.Equal( + t, + obTime2.Local().Truncate(time.Millisecond), + dbOS2.UpdatedAt.Local().Truncate(time.Millisecond), + ) +} diff --git a/exp/ticker/internal/utils/main.go b/exp/ticker/internal/utils/main.go index 348980b0c2..d29942ce7f 100644 --- a/exp/ticker/internal/utils/main.go +++ b/exp/ticker/internal/utils/main.go @@ -58,3 +58,13 @@ func GetAssetString(assetType string, code string, issuer string) string { func TimeToUnixEpoch(t time.Time) int64 { return t.UnixNano() / 1000000 } + +// CalcSpread calculates the spread stats for the given bidMax and askMin orderbook values +func CalcSpread(bidMax float64, askMin float64) (spread float64, midPoint float64) { + if askMin == 0 || bidMax == 0 { + return 0, 0 + } + spread = (askMin - bidMax) / askMin + midPoint = bidMax + spread/2.0 + return +}