From e5993afc0c8964485b63dd7de3ec246ca1cec705 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 22 Aug 2022 17:10:52 -0300 Subject: [PATCH 01/42] change(checkpoints): Update block hash checkpoints for mainnet (#4919) --- .../src/checkpoint/main-checkpoints.txt | 641 ++++++++++++++++++ 1 file changed, 641 insertions(+) diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 616b67e4426..7da6a804a8d 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -4986,3 +4986,644 @@ 1751364 000000000031da480502e8a9c5683253255651a8e4afcae694f2da88550f6dab 1751447 0000000000166276995b9996332528c5d5fccc7a3ac43a0673f860d79e0d1ee0 1751518 000000000003ca14e1f3b663d4bcf7aab3d645c3c8aa8b8d561f616ba070a36d +1751609 0000000000800d95f6f7c48b18b428b95be1b8fc0a6c079095f70abd0fcdacbb +1751689 0000000001480f0eed4c1e7fbedf342376bde415248a4ff16d59c731b33d08ef +1751742 0000000000cb236645779fbadecdb3df3b0669b7ca759596c8e417d1ec83ee54 +1751793 0000000000b6ca448dc2fa6c3bf3a60df69f36b6c0c54f92800f7d8e50531e95 +1751833 0000000001ab6f03b84ea59be7fa2262c2139ff5e5d0c4f34d6f25f8d0790e59 +1751883 0000000001767fc3a34e6e5ae3537d0a4e9fdfee90e181aff50ca3fcf3cbae4e +1751920 000000000009231d7fe98bc7e82bd907a70315b32e21713d0ce6f51e7ffc92c0 +1751953 0000000000a93bc4fd5cc4277862a34b0dca63ba0f62fc118a02ca12886fa9dc +1751985 00000000007caa4d6b7b20f6ba819f492c93d871360805d9fde09cec2212eaba +1752038 0000000001a1c5b0de9631dae3b042332fde3dee9ecc6d17c7ffafdbffc26a67 +1752078 0000000001833abfa8c01c32784ac7bb219d6874969f3fb549ab4299a91bff3c +1752121 0000000001c3a27f112f2fd11a95cecb608454d44287e44b9c6033895a617764 +1752159 0000000000b327a70b7d2fdb090112d8845486ff55faeeba7be92ad22a9eb61e +1752196 0000000000e9f8f7b533785cc3219986f80904e94962542fcd7057056d3df709 +1752229 0000000001d3d2328555dfd046c12a3e6259cc56191efaf3176c548ce83f9c55 +1752277 0000000000cefd7d05f9ae37b2ce122eed8a40d5d6f66a352c0bf9d4636dd2f7 +1752309 0000000000608009fc98ab2d1d5fdd9fbc7f5105f0aa3329c08031e8b689783a +1752349 000000000174f19b99eba020c5690e6fc3712b65efe08d06f3e7bba41450c1fd +1752377 00000000010f4913ac1dd940692071c2d658064643f60e5b10c5131753582ec2 +1752413 0000000001825c783f7ad5367b6121d96295a539506d5d374d049ee4d02c83c8 +1752457 0000000001696da673e3d71e3037d610d1ec7de9fc6423077e332129cc754ddd +1752498 0000000000485a623817ebc19922b093f3afbe67940aa39de6f8c8e8af99e4c8 +1752530 0000000000e1b526df7cff1aa0ba7f193b18f58ae393f45b98a49e40eae47bea +1752566 000000000121ff4eb5f0274a3ea83678a0601a95ac2812f1bd1febb355ead78a +1752604 000000000031185ee2ab26a64c0f3cdd787b4480a7d0337b02f629dbf303f365 +1752635 0000000000ba52e7d7c4c6eec18f75d2465c84816300d3ed708b3089cb2f46c3 +1752671 0000000001d333e18ae681a225e0661ab5b89b0f560c8379b56fabc252575624 +1752711 000000000150719e03d8c5345d7f98077fceae784604a4dc67a8f900cd15d699 +1752753 00000000006bb9d84f6b2fcc463ed03a51038eaa3e9e897e1c91691c53f139fb +1752790 00000000004990f7ff328bb9212ede946319fdba913fcca3aa9fca0c40c44cab +1752823 00000000018c7beb65698b97d95094c4d49dc8002864f4be5121dfd53ff552dd +1752865 00000000011fc821ae2ecc7e8ccfc79d2fba75f844b02e7fe3a6aa20545a56f8 +1752904 0000000000ba5ccdf54a0e52f3000624ac363cb1dd24e546a324f63359f542bd +1752938 00000000008882709322a733f923b8ebf9c8333476b72f4c2c9decb093af0169 +1752974 00000000010e25b203772884007aeb56932e36dd32368b5be5d6640d2426c6bb +1753015 0000000000278f4cd6aa299e971e011a632868b263159951e8a159c3b93ad581 +1753046 000000000175918319fd67d12b88d401057ad00b0b4c83692c6e920fddc3a1bf +1753081 000000000073b0eafb303f1037639ad8caa6dd3425befda6cc10143bdba34076 +1753109 00000000014b95c339096af89bb0d2c70075d87db32be00acd911b4fe67abf63 +1753149 0000000000c376d0687c0def83d6086d501d992080f4579b480c7fd8dfc58aec +1753205 0000000000267937446d63d3454f85fa97ae490e642dfc39bbd312ca2ff9c565 +1753243 0000000001deb41a4ece40e0a0e6b7834012e141c8e5143a704f3923731867b2 +1753285 000000000149493f6824c11e4d5fdf7676f73b6cc201825a350701dd57c0a0d6 +1753319 0000000000d1f15b8071f4832cb39290b8ba9f81a0852b1b292addbc7815f107 +1753356 00000000000a296f32f51702286013f3709da1cba8b8e5ded02e3c6e36039ef5 +1753399 0000000000cc23f5cb41861f5ae80b0fdaac6bad56f123ea18ddbe6dd1f80e2c +1753436 00000000013e7c55c28af1cd7de47f84ac5368002096157d4c5839bca7751a94 +1753467 000000000053a79df7292b692a9aa13f7ffcfb3bcd101725cdb676277dc9f7f0 +1753511 00000000017a0cb382f5952bd16796f6e5c756d300b3138d18ece14969304b44 +1753550 000000000028d8ab8f8f91224c0fd4068d77047415ddac7c813a593c43d72a5f +1753586 0000000000a7372071eda318aa29df291be93e6945edbab63491214909099563 +1753628 00000000000824c5f25012afa9a002755823c2d0a3f21e186e9f64ad8f09f2d2 +1753667 00000000010175313c1866bcbd1805af0984420e3d9ebba031a981c006235067 +1753716 00000000001d542a6df159733f650a8d06345170b4b8da669d2bfd9a374be529 +1753800 00000000002f4816d1664027df72181092798f0b94dc285906b756b4bc221cb0 +1753862 000000000066be9ed4790bfc8b966a36ce4235adfe695c068585ea26b5ab44f2 +1753900 0000000000c8e7961ad49316fe5f2d81ebf1ed2701ae6f0ea8ef1b8bfd638d4f +1753954 00000000013dd47aaa066d5a69d8d2478eb69854a9c3f79a546c9d25c81fc2bd +1754000 000000000145a74bef86157a79e149e0f982453413a028abdb71a2a6f13c0606 +1754038 0000000000ed3eff0b93ef37ff1a122fcdbbe33fdcefa51271cc4c0081572549 +1754086 00000000009dcdb9f98ca7bc6b25459a266b2661228d1bf7b8b2c53a7def3762 +1754129 0000000000ab21636b321d250ee77e0b6468eeeca62f382fbd606adaeeef8421 +1754178 0000000000ef969ebaaff7e4ee2b45035da1ebda36bc0853b382ef182f1d7394 +1754228 000000000030888c9789ae48bf897819a075f6da41e9aecfa49c8bc371640280 +1754265 0000000001d57b8d6700d81cf8a777d07061227c8e46f629309f9f98a2335ef3 +1754314 000000000025679ef0051936ff1c8afcc92e32640eeb9274199f18ef510de32b +1754357 000000000119866d111781b52327b00a2a94b71ee2fd7edfef18483dd1c2bcda +1754390 00000000009f02e1e632eba93e043611deb33f40f6fbb1ea6d684b09288a768c +1754449 000000000063d0aa2c67e7aaac3e9b31ef375926368a865404f13eb4b182f25d +1754488 0000000001625cea0bac30615f385e26b3066015249e6bcc18d793abfcd312de +1754533 0000000000cf8fc5be2ea0bb418692b2f4bf4b790227eba4150872eb7b682dae +1754576 0000000001ba8f58db5fcfebe97e33b7cf306ea36eee59a2c9ffa66f261ec2f8 +1754620 0000000000e47db4be8cd5cc44025c8899bdd45755943d12cecee365e16de823 +1754667 0000000001818b19327e1defa47e6b214e3c580a1099caf94ace556e8db3980a +1754705 0000000001da1e86792d4bbb93ee61aa6cf093d7c749373e978388eb9efd7234 +1754765 00000000005ecfde33846ddf294e3e9e54e2a68013c9ab4ff6a439f5e5c4bdf4 +1754802 0000000001ef86a5d3c12f5222279b28a523b9c0b16e8f483106c8ca7d551e8f +1754851 000000000102002620b57b05fedb976ed92e4740e05ddc1a84424bd66a5c7a33 +1754899 0000000000fb0f3bcd8b98a801beca7f19d6cad2a21c2748dfcb3d11a768b322 +1754960 0000000000d8e6bf50f5b302f87c37a2d3f71f9e8e22147cb1e25f66182f34da +1754997 000000000057fb740775757c712758a79fd869a83426a916a591cde79fb4069e +1755046 0000000001ae8eb3008a820c8b6102b3f312dd3ee58fbf4feb9d1ad9a784028a +1755101 000000000098b3cc545d0f337308c40d719dba743c3f3f384e2235ecf57d6d9b +1755131 0000000001561492e426722098be3d6914e60c90f0a4eed2a958c3f84e8637ad +1755187 00000000001f77235ef671f042b7ae3bb04e26a77de1d1acec28144c454c001b +1755229 0000000001764b9e5ac5d8d440686b8a2f4e2dee6619154e561eb627420652e0 +1755280 0000000000afef71fa0490102bf98e0573ff6268d50a72057cc9b15be5dd5802 +1755321 0000000000ba88fab7bcc8652ebe46bde686d0d0da5d2769fcac950601426f17 +1755369 00000000000d029df4abe66c5575e498af6d1d2dced025a5cc5e1f36e2a6ce9b +1755422 0000000001110ea0ad0fb93e5daa0b44c835a8bcab1d11702a600955b4da709c +1755471 00000000000551068552a492a24d8040159cea3397bc5e4ee55e4c26bebb9d42 +1755513 0000000000020253c17d35a75957827349678147b2a281c57f08ff10a5ea1b7d +1755564 00000000015bdb6276b8e498e870cab033738cc923c7982b8cecef5a16347cf3 +1755609 000000000084a00df911eb4489aeb8e32caa29d854111abfb96e59ed21f6ef1a +1755651 000000000026d1813c52c19e14bce25b1829bb7355af117af852d0393445a876 +1755703 00000000000aa1839c7741ce92f86082c0e932698abb22300cb398b00a40ce62 +1755758 00000000010d0498d5d3be9b670be158c75a5f3c3cf78adbc1b93c7e1a4094a3 +1755808 000000000175d7bf428e9d5e8227b656c3e8873609e4ea4021750872dffc81e3 +1755849 00000000002c0053eb1b84877c5ef8bc7a69209bd80371f0681a0859e9dcffd0 +1755888 0000000001f9bff067124eef02a99d6bec493c5e22f4aeed843cbe5dfcdd64e9 +1755947 00000000010b4b4650c2a6ac1329137ed0ed42f58e85122e23a6b21f71ad0373 +1755995 00000000014983284b7f0f4be6b832345e513b285e058bacb123eb13ea49053a +1756036 00000000004d71f042083b54cb4719c6e9b156e124b4f95a28c893dabfea93c6 +1756089 0000000000738b950cc9c4c5e1d941c097d8f34981872537a5e891ad3c3c028f +1756133 00000000019855a8235fd366036b7955d6b50e2446290bf7851d19aaac399095 +1756176 000000000194c5a9a27b492e60cb541fcf932a4a7584a57f7b930946674f40a6 +1756239 0000000001540f59e5ca4810c3d13491663df4850570eabef6e53ffa8a81cb11 +1756278 0000000000759d3b5b8f8b234d489d1f228c71d6e405dd187dceaeecb3304b6b +1756331 000000000035caa1fc97674e52f37d7a2307330f2ef422ff433fa5a2a2a085c5 +1756371 0000000000ef562dbb61f2c901d85117819bafdd9b83e727e16dce11db60f0b9 +1756417 00000000006cc1b7e6101f7f68cd5fbcb80c1b4f4cd44cef52350fefcca1c243 +1756471 00000000015b342c6ab31e94e6dc3ae643dfbd50033ae2ec51f5742996763dfe +1756522 0000000000c61f7963b2ac865abf3372006233715c8ef2d6492f719a67b6abaa +1756577 000000000018df9ea418e42cce7a7607fd15c9503a4501da51603aafd2caf30a +1756618 0000000001368d64ab51cf21475f580b18f89b2e332df987c09ce0861ac4efc3 +1756660 00000000005dbd9629a484c2b139adf93a3afe24578a97ef789dc54a91cd35cc +1756719 0000000000959a4e85ae047dceaa3b5e148b5fae3fcb380136526801b7d413c5 +1756757 00000000013eed8290bb46361dccad6b251d5783bf9549930a974e3c254c8ddd +1756805 00000000015b813b1277ff54aca159ac2ffdabba0c03d0aa74ce67a83cec321b +1756847 0000000000d8673f3df2b00980434c11ff92ed29dd9e83c19f28ad8a1e3c719b +1756897 0000000000b3248578dc9aa305314e7eb775f058cc5a155c29775b0e010191a7 +1756941 0000000000c2072c99dc2cac7db1bcd59bd520ba29a4c7aba5ab62a31dbf2f19 +1756988 00000000001482ed08fa9160b3cd2aa7895c05f4bf55f2f48048572a5c9456c3 +1757038 0000000000cc6fa6056c0c812ee4a232415ab01bf5e64eb7bac23feea0315602 +1757087 0000000000e5fee947f6e7f900392eb62cdc1084076f948963227e6fcf015118 +1757132 0000000000ccf5be278da0bfb6edfd8d842297e9f85370f2c04244f93b90ade0 +1757166 0000000000434fd11d2b48e38814948d7f4af42d95d73c2437997454f12094a2 +1757215 000000000059ef453c99f78245080de37521a886545cc5220826bcbf89491413 +1757266 0000000000911d6c6c11cc864e9fd7134339906b59625f6f90dc7805577c436d +1757311 0000000000650554b2370f6e2506782c1d003b87cc11cc5b3c03eb486d656355 +1757358 00000000001fa1500df48aad64b05539b682a000dec9c24c563eb666d741fa13 +1757406 00000000012c9747f42046d6e03218d265296fafbe3a11947f10efc05cb9d00e +1757453 00000000014168f0dd05d7e7e24ce6f402d4022a51e0770f2009d0b839708d1c +1757505 0000000000515702bca6bdc0a930413cb800344310693cc450f9b87e9ba5548e +1757554 0000000001096b4fbbc6e987f79b3bc636ec61c8ca5343f2f9408aa12b729f7d +1757606 000000000109ea6d996942b1c9fbc52c33ca821d105e5bb6617e3c4c57c99ddb +1757655 000000000113831c1b6b63f5940f35292f260a37046cf6efea9b47ed94960ca2 +1757700 0000000000950a019f15c4eaf38d3e7e943b72ab8fdbf923971cc8d903ba30a3 +1757753 0000000000c7976c6c04cb845c6bd1578848555d0db6ec0be3fddd783ee9701b +1757797 00000000011459b74074bb8aebae3b5a01e215a98822943468435da42f5a0389 +1757847 00000000012e7488949834866f2c16466586203a4044a2d33dbddbbb33fe7a14 +1757900 000000000036581ee479cbb2e6ed18378ba9b71f85f50b1cdcaf560da0584cf7 +1757938 00000000010be4c180d889abc2ef464b14b8565ed5c6b5016a5f65eeca03766c +1757990 0000000000f1cc9157d3d70643daf5487d22166cc9bd0bc08a90ca39e06886fa +1758037 0000000000061298f9fbba594c73e33d718d0696f39ba3ae0e4852803e77b665 +1758083 000000000102deb534ba2ba8b007deed4d6a0f693b93a97f1532ceff09393e71 +1758134 0000000001452eb26e442c7e878b3b47a117ecc9a7b64a9670eee2e65216b11b +1758201 0000000000b54173d345344cbeb747423c6f748afdb6e95a221a88feac0466e3 +1758274 00000000014878326add26fee7eb313d311338632ce405232e8d4ece0153edff +1758358 0000000000c316d8610c62c646892929798d5457ab8f3d5c56c98b1fbd99cf14 +1758429 0000000001339c2095077fc83d3f73f878db7f2db2648a6f0f04381ca5798e44 +1758521 000000000181c82b95e8290d0846956815c4f51e0e5f463a4da20e7387ea686d +1758593 00000000005f4c83c6f129d4afb6d04f97fb4a12f589cc32900f83f9ab24466c +1758646 0000000000cc37ebbc01b7de0d16ce8bdc32a0816fb7937d6eb8ed49613a87f6 +1758713 00000000006528cb78f52faa0f6ddde960b4375ca0d7ba98eb88060801c2f982 +1758751 0000000000ed62c7fbb94fa4bb86a4ea9e4f7d65a729887a3944f79482a9ae83 +1758805 000000000181e21dbab5f145fa700857ad7fca38bf520ea18ccada82958f6b10 +1758846 000000000063400b5c2bc37854a81fcc25c6c0362fa0ed642392f55e9a9e154f +1758887 00000000019f08cee14dfa9fca2813a72a3c69c83469f4969fa2a4a1f2d4889e +1758929 0000000000913bc865548d9046fdced46f141ec2870b80bda6378c46c0a286c7 +1758963 0000000001342ede25a3c50083354fd5561792d1a75a143a91d591cca314e70e +1759009 0000000000da0c9d7be3ad46f1bc274dab865c5a6f80c8148d506c3d8a321853 +1759062 000000000118cd8f98b810214aa0a478bfb554c11604f5a8305f8bd97ed1d896 +1759094 0000000000909a8274bacbc0119854ba05df5b3ae3d16a54e843ba6f3adcfb3d +1759138 000000000158d1aa3b279e92d9b3a81c33f6d12d11a56ebfed4c4682cc6e56e4 +1759175 0000000001670ada677912c6ce0de3793ebaa37590478f15e8720c46a9bd86eb +1759219 000000000004eb8ef371cba3482e2c32ab1ae568139fe9b6a829e432fddccac4 +1759266 00000000009cf128a7a24d39e8a18a9462816338f3aabf94cbf51ecf14fc23cd +1759305 0000000001672f5c17b7a5cf878572aa83a2f8b8f88e6aa6938fbc5fd6bba76f +1759341 0000000001d713a26f9ad19a3c38e6f32037c0fccce8cc9b3e22b34e5fef77da +1759378 000000000040f5eaf670e62b92980ea2738f0faa043651abd00039443f772362 +1759425 0000000000cf82775547a984cdbe3e811750c8283b55bde281e44a0060b883dc +1759460 0000000001109535d7ca3680f0f0dfb312f926b5822d1b388026b7ac75a1a9ea +1759497 000000000118358834ab0c0a45eb4bf0fe43e47e9f103dd68edbcc9a89e99989 +1759543 00000000008e1698a5b731b100b09811a69c19fe8c4cc02dc2bccbfc873e1937 +1759577 0000000000d8565d9d05110d8456f556abeada5111c7b794897b3d0f94f3747c +1759622 0000000000e85a1e391177f7c6818d2908ac24842e06da03a573a53b5cbf943b +1759668 000000000170da14a0d30d0cf1cfb9c187d65100305635334a0ef364a94252dd +1759712 0000000000ca80b49b3c516047cc529699c06bedb6dcbd0b50b3f688910b6e94 +1759750 000000000125a23632b59f32bef3bb43930c50f5fa6eaa586ba6dba078bb1e94 +1759791 00000000002c48e03dfd5f527859edc1c01f611503199e13c90f4bc981e73a24 +1759824 00000000001fb150aed2853637e69fb57ea5d36556e35b6750119c61c6bf24c9 +1759870 0000000000b3bc4ad9d1c73c1536267a98bc31437a364feb0360d1abcdc4956d +1759908 000000000127b277eb81b263b9c863bc9089fd04662846b949702ed193fee106 +1759950 0000000000044315ee02f218f8d470a4d645d5f380e64409247df586fa53d297 +1759995 00000000001e82f4f6452809d9121dcbf85fdad457362a8d34565a7c61d8dc83 +1760047 000000000032ed1bb0643c0bce22ad06f5b7040ff9d14cc9584ae04f4856394d +1760082 0000000001012bc01d3fd38c13781c91213dac1e57c9d5195210af7b8c458335 +1760127 0000000000cffeedf52e8926dbb8d71e9194021c4da7fc2c68c1be1e98115cd7 +1760168 00000000003b64f9e8f8c5c9c202cc4a00f45bcb4f4557a228350fc128fc8757 +1760210 00000000010b8617712bbe2c35d5e49b4742230d900a4de33b4d54db6aeb474d +1760254 00000000013966980e6ccb5916199acfe9af71a411c7e7eaeb38f853208c15a3 +1760298 0000000001650fdbe6625e435995435e6dafada733a0c0aff7e966b1ae180f64 +1760341 00000000015ed06a708e776dd892e14d44407724ea65009c9a234497ea7479c7 +1760384 000000000143700a2fb16133ca52cba2dddb37bf75ffedac728fae562b6f7253 +1760435 000000000088e5a3587cc5653549956a883b0d0e951c3de97a9c1e76d4fa8385 +1760471 0000000000cf5acd3b151a8751a50cc9695e455d6077fab3bfb17ab97dcff21b +1760511 00000000017e3d0d9859247799cc6cfec48d79d419f515a34595aa9df5d29193 +1760546 0000000001e975111697f423abad3b62e5eac9dcfa62e81fa8b8f1302338778d +1760583 0000000000d41f9a879678cf7be1cf4df077fa3882f9ab8475fd8369d8765c18 +1760635 0000000001c4cce59ebcd5178ce75795c2b98af803ee2a9bcaddde95a645ebfd +1760681 0000000000d3f6090b03905dc7715e4226e8bf78e7ca17463b9eaca4591a3819 +1760723 00000000005cfbfaa82a6a45a3a9ba2a50bb7962e04453ff185247f038097925 +1760768 00000000015da37b9f644d31e42750602c3dcc359f34b1da4581c10055702ee5 +1760803 000000000180ca09f4f108c7d5190c56f820a8f04845fd1f121d4fdc2032b779 +1760860 0000000000202ccd52f1a55d4687825a8a666e948ce7b3115545f18922b07559 +1760896 0000000000f4ecee96ca72b140396bca4620578e52aa659e0c47630f827c37fc +1760941 00000000017b09e13396bd762556c15bdf1528b59aeb7930eafecc59281ec457 +1760985 000000000156aaf2062f68f340235ae8d0b18533e09695692a501f8cf40ff6b6 +1761024 000000000027739db713d7259fa0a4b738e22fd65c349b51b1b6dc2879a9117f +1761065 000000000146fd568a1cbae5f934e6e8f6efcf0f69871f1e93e80fc951fe52f3 +1761105 0000000000ef6f75b77b53264e6aaf43e64613c288b97a4feb4b69551acbb1bc +1761151 00000000009946fbfc0cffb47ab50c7f17aa9fda7d804cfefe2062f1e74e1a59 +1761194 000000000177177903ffb313194c2f495e89c9b1356682471a18941b18ffa5c5 +1761247 0000000000cbadfe2e80abf6920bb4932dba89d3ddc99a6323bfe1e9bfc01317 +1761280 0000000001156e6698b0bdcd35aa9deb9ae2ff4581b1f088b84630f7bc05b99b +1761324 0000000000da1b0b514c7df0cfef3f2781e12d7c93210fd4343746a3ecde083e +1761377 0000000000df0fc9f2e4aa5f498799c9fe66fe99ac9f099ace2a2f3719c8a3d2 +1761421 00000000000ec38f0e4dedcb16d88c9a02eb8cb0e18e389bd4c138685021007b +1761486 00000000011acf47b69b168bb9b16d32b75a023d950924bad359b842063cd2f4 +1761543 00000000004ef00abfa863d52302919856d30129439d9907bcb27bbe47ae7b5a +1761601 0000000000173c2f8bd5a7a8a1330189dd484118d16cc2947a1e418c471bfe7d +1761660 00000000016aa3cc4b4027656015b2d396d5096c8eced4bd12addffa5d49a07e +1761714 00000000013d11bf08de301ff61daca078a2708f042e4e085d6944270b40306a +1761765 000000000047ca5af2ec9c05f3c9b65340401090ffcfe5d693eaa3fcf426b9b1 +1761825 00000000003fb6b3fe5883a98fba11fd8758ac0dc6af7883301f74084366a029 +1761909 00000000018943d9358ff1b8c1e082e80ff6b7f4be8d8a17ab77ad3a14851147 +1761960 0000000000554d1e50cf462cbe1bbbfe2c1dbb20f20782d6f823ba2f612cae22 +1762035 000000000124b1948645efc247d993ccd93324c733b7e60be909c58018fdec85 +1762118 0000000000c2a02779afd87fa8cd38335b759d5d59f76c542d04994a1b360c77 +1762193 0000000000797da849d4f94cc5439f817e8d69178fce402b864ae69dfb7c3a23 +1762267 00000000015effd3eedc68a30f8fc6487ed96c5d298645bdbcb12bedefbaeb76 +1762324 000000000174594b3f8513c046950d53d5f61f5305ef2fef30464ed1c2f5cddd +1762380 000000000111ff3e939912e4b6f56efc23378418ab5ad2d62fb3b424193e54e9 +1762450 00000000016c736b67cdc5ade5373843168d88b4c6114b5369f4a28f619ed230 +1762489 0000000000921ada115b17660efd9dccbd63ad1c7c9dfabf8d38f36c721280cc +1762521 00000000014dd8c1647db9554a2a164ec7e819fa59a5fa52d773bad93ea0555e +1762559 0000000000df2d4c07691f261dc777b2b596e0b49e625d150a26c4dbafce6905 +1762607 0000000000e3ad3dcd3a445885822258f519c8d78c19af5b8fd363cf5c8df9e9 +1762633 0000000000fee048c565e6b5e926ea29505d143a7bc9902be4e8d8a18696aeb2 +1762673 0000000000b788cbc30a899bc7f557de607611cbe44046d619a3d58dcbcbc54b +1762756 0000000000be0d6547f51eab0bf89abc5fbcb5f2774e2e83a3868f385586bb3f +1762801 0000000000746d4300840f77e2c476a91b1598673f6eb526171fba5e7e45b4a4 +1762841 000000000065de10bd94ea122eeb1f27007f498b5c4142c033dcf453455a28a3 +1762880 00000000014661378316f5ca012420566915bfb387468321f7de24bec051be11 +1762914 000000000150264feff9055a3cb1cb0f04e605104965343620a1ce0c58a7072e +1762961 000000000136c9cf2e0eda73b574c935a85fa301aeb3b49220b655b038bcd445 +1762992 00000000003cb5fb5decd989eb5256e8c28837c26856cfe93c7ab712eb11631d +1763023 00000000011c25ec0085759381ca9c52286a1426ca00b75bdaecace7ef282c2a +1763064 00000000004738322221aa1325150a49a0ade8ecbb67d3d3156c95f7eb5051b8 +1763102 00000000003d408a024500c70365b8eb2f037a8355098237261d3658f3731381 +1763136 00000000000a0ff7504da909ffe65d9e90322ec15c37cefea6adcea8e97ea53f +1763177 00000000014abd71b3b11a9c77d46740cc3262df1b531fc96536eac771d5a622 +1763218 0000000000a3f95ee5eed080aaf0f8ad6665d7ebe3f3fe81416b4d6382f88a45 +1763250 000000000009577d7bafbef7e04403e72be8a2b3a41d45045697938597794ffb +1763272 00000000006a552e94bc45b4bc59e2d0de5aa3fa5f0ddeac18cced1233e01553 +1763326 0000000001344173b55232364753e905fc89065e6321e8bda46299e368542d6b +1763358 0000000000f9a0983116c769a00241de4942b755f66ede04174d690ae06cd0d2 +1763387 0000000000291cec827c8e59e69532de711ee49c742f67b480decf73f52f1cdd +1763431 0000000000569f5d78c2079a0936cb01bd0ad6766db5e1564fc14bfc14fed6d6 +1763467 000000000094e65579e0a01629d90e4ab449c2d49c933d19aaa8c5abc945250b +1763508 00000000008524b1105d8747aeb767d5d9c95c799e4a7b63dd44d7c5d1873bf7 +1763538 0000000001828a8600f3ab9b161769716d6374f4af1ec532870628704868c74c +1763578 0000000000777f096ea1a38caef61c2b19f5bdc5b9c0b507a00c2d9e57dfb784 +1763614 000000000077c4be424a8873fff664e27f438af5d738c4086647dc303ef965a4 +1763653 000000000084f34ce68d2ca4c803d45c7b37b43b3081d57e555376efb9565a48 +1763689 00000000000671f84b2cb717835bf815e89064d1eceb066347ec996f9f9452cf +1763728 000000000065a266aeb87d496018864e70659e250a1c9534ac1c6cefb1d7b265 +1763765 000000000090cad864a2793cfe051994c031f69cbf17d8304ee218b73bdcd6af +1763804 00000000005e837b52daab71da02822bac963b0603ab0fb6de00095e952ceab4 +1763839 00000000002f55b1bde560e4ee9139fd825adc6f506e59364ec449113a8c9191 +1763872 00000000010572de35f29f246f5b341d161c836063f37596e3f7805593a068ba +1763906 0000000001f58b1e877e72d3e0d59fac5c11583f5fe788d3d6ef934a7f54b3a3 +1763944 0000000000917999f512f4609b084edc6f53643f8a189255af05ec5ef2ed7dee +1763978 0000000000c69d4eca67ac89816767172aee9d566aa6d63dffb630ebfc6cc611 +1764026 0000000000cd77dad5e7bf78447e6568c276c347b6eab48a57f85f1e64a00236 +1764057 00000000016afcbc177d37c8f9d6871cd550a34f17d55942dea2fdf71a20dd1d +1764096 0000000000f4445cef08ed8d67b65915b65ac137c52fb0ead2cc905c9b8306ee +1764129 0000000001c0e957ef5ca86c87defa416590d651320a1454a37e6e95a0046dc7 +1764165 0000000000f8beca88f06a3bc380fb6ac491226ffe9a1b1b7e080a651153027f +1764207 000000000046bd0ee83efffb3ac4866526103001a22dc8bffea064f56e155478 +1764241 000000000156615c103707d3025f7c8fb043fa8ffe5e41d495c91f7b855741b5 +1764283 0000000000963d096c547e7e6d45cbeb2b7b1a97982d1379df6803c914bd6367 +1764320 0000000001224d912b9ba32b6d0b1aa9b39f735ee8eab4f9aa29312f8f47a36b +1764350 0000000000921da1d3be00972168835fd701012d7468d195a2a26f61ea5c47f5 +1764394 000000000091a95cf62b35502a69c8aed7b250cdc2e45cd9adf3cbfecb54dcda +1764429 0000000001ac26aff06b72f924ff05fbca04875275e9d03f99bcdff66ca2ea22 +1764467 0000000000543c72dd1c97564ddeff12a73110df7518889020d6ebead3ff1fea +1764500 0000000000d6c8da8ae15278cdb6f402652a0757ec4b115cf305f06ac4dd01f9 +1764546 00000000013010e125455e9e9bcf1b1b8231fc82e73b931ed8486c59a46dd474 +1764571 00000000009265ed1cb9e476368e94156e3efe008a06fed21602cb8d4936d37a +1764610 0000000000786f70a7256b0cfdcb682d616a28435836eb9750b9d78a11f81bbe +1764645 0000000000a11026650fd955730af97ca8fbfc1e0ae1f7ff94bb0a1d8844a5ac +1764686 0000000001103da711b2eb7459d3134f0e79e0f01e3a9daca4ba5d98172d35a4 +1764728 00000000004e30c0eb727e2ed3698abe67adc9d98c692f9fd5fe0c254921482e +1764766 000000000165c8fc48b809b020a50e6cad7e2e2e665d2fef24bcb8d69d5a0849 +1764805 000000000167d561491ab02c51a3e37f67fc64243779b7b6f17666d2830f2eef +1764846 0000000000317271d177c6d704c6e8018dfa573a33b4b1261661e7af6321d39d +1764880 00000000005bcff1f9f2159cf994d82e244b985d74caca9b230742f2d6528616 +1764908 00000000014a7b82dc2da2e4b392f2259befeb1d0c955e6e576269cf9c92fbb3 +1764946 0000000000f7aa70be1e36c8657e8e591b5e0f911803341dd670a399012d7952 +1764986 000000000100ea7e640413eb9c38d730686c359566d2635286d54588b9b5a771 +1765022 0000000000766ff7c6af69b1e72b7481e78df0732a41f7d26e09a7e26a4696ee +1765063 00000000008510ea35bd89490fe39611f2232f9a69fab3aff7b77abf5223baf8 +1765094 00000000015778b3fae2d58b6d9831f87edb2fd8f565f8674435818124488cca +1765136 0000000001701c88da88c7b4ba2a78bcc40cef9842eb84fdd73263f00eba3061 +1765163 0000000001011cc8a1c3170166bd78351efd4122a05831c3344fbf207a49ad85 +1765207 0000000000f7b852bf6c06049cc0a7a6d58facaf28a5627da1c346e3ce84462e +1765244 0000000001572b3380f6df71bfa58e8b7b85148c3bfb040551e219a18ae8700d +1765278 00000000011261ab0e875fc8cef2eb88ba3e5da8797829e1679f894a990d2ea1 +1765329 0000000000e30ea4447527c70ff21dcbebe6a702836b7b178fa35f5f69414458 +1765362 00000000002118d457668e8b6b35555fa67bd176bbd5407ffda5f7cff5118a9b +1765397 00000000005a679e3a909b65c94a8da5ee231f82d7bc5355071d0049b9ccb93f +1765437 0000000000d79bca6f988995f763bbef728a028ec28336137e38a545428da36f +1765474 00000000019ad7c8ba64e68443ee58e6140c9779892f3e7d691978c0e4521b24 +1765517 00000000008d26d183581f939e2296097ae5f88d9cc338684d292793f95321b2 +1765549 0000000001b22914a80b593bd5e526fd0baa5d2e205cdbc54a6a7677571d2738 +1765592 000000000100fb5026c4d6485a85a47973720bb4e499e9f6098fd08fc806516b +1765628 0000000000ab0b38ed9b8f9d7c0f3a6e242f6d75727b9c2b78d8ece82520aad5 +1765666 0000000000377e1914bc574c50d31c7c2f1c41c6ddd471bf43ede381b40f5ee8 +1765704 0000000000d4235e793cf8d0a60f506e43799d68ffb584e8de8b547308ec7f2d +1765742 000000000026c392fbcbb7105a17e23201676435d13060311546c1ed3013ac10 +1765778 000000000111f15fee58aafd09b546a15dad04c6739660f849cdb453730bc022 +1765808 000000000075f9880cb02c89e699dfb4594563d3da5e9696c3f13427f986b8e1 +1765846 00000000005149469e483831284e208ed135c33262cea2099ce59aa50dc11ee5 +1765883 0000000000772537f61b4b06400633c08dc9bf36e97ca5a341721668923b7aa7 +1765918 00000000014ae0083d30554f9f515cf8950845b6a42022ca5555fe6d6253f70a +1765958 00000000008e00aacc3fc1dbf12dad186a49b0038a68fa44e536484e4a8366b9 +1765990 00000000018b1d54f94788b26d0ff43fd2fbbd66edbe968d60f6ced05a4a1f6d +1766021 000000000148c5600633dd883dce66af827bda8762b4897ab4a627f05db90e0d +1766063 0000000000c2fca980169067835bf3bd8db563e26d6d16151643b99d48615279 +1766092 00000000009faff1ba06c12d12f905faccde22e6cb37a3485e6743860a5ac224 +1766133 0000000000a842f3703de2cafc685b8686cd06d7e718a2814c02b8aad529ce27 +1766172 000000000017a2d883184b8846788f740278e243d2419fce5fb010d6e31b5e88 +1766211 0000000000cc70529a9ba85e23557f4363840323633f7dbe46d74a118e891620 +1766253 00000000011021a3c9333a693d2aa3f3bc2158d50de2ff318821781275abb493 +1766278 00000000017eb236a6eae9be948c5787b9d6200bbf4729eae0f29daccb4ae7ae +1766316 0000000000233f8e8cff9a27fda81b156727436472f5dffa4706315b11ee0452 +1766346 00000000016a159fde633e06c4be19a8934011dff594a31e47ef7b72cd8c9af1 +1766416 0000000000d25919d61b74abc7b6c701a2bf04e62cebd0dd8c1016b844172474 +1766470 000000000119a1f69c25c341d9a564571b85e9078ccf5e2cb5b8559b50a30f1c +1766517 0000000000573f9ba27189e0a123342a7d3de864e65f3bdcf650c0cb8111d0ed +1766550 00000000012cbe20668c8f90f65b8420b62336d604e091ca90bd353927f0b188 +1766590 000000000178a7deb917de7d5479265903e541a86a3e631fd7d29c34c188c4a7 +1766628 00000000016f8463f4418354433930e70e56bc89ef9499712392949d5a7e58a9 +1766666 00000000018db19374ee2d04a687810c8a28651ceb5990f92a0fc6d710417d13 +1766701 000000000148f189efd563ce82641d746affb6ca5ea64b3a0b9564a1e0cbe074 +1766737 00000000014f6e504c5f58d7bf83e5e89a449e81107074b1654761e71040a1c0 +1766771 0000000000c04c7510477bfebd8fa6bf9ba1b7cd59283c0db52f9517e5f8588c +1766814 0000000001599a0f9afca629ba95a3f86ff868f17b20a86bd4f51bff82f0cec9 +1766844 00000000018d43eb3ac7ba2a992be8a8567eed81900c529f9216d3b20fb7b545 +1766880 000000000069a92a41ea0469ce1a413587633f4f4502ba56d076c98d5aaed692 +1766912 00000000012199415577c1ca4ded52d218ff219df0fad6f6995bb0d9f3b5133a +1766951 0000000000ad588ad064e02bbcf605dea1fb5f71615a219ba21364ad333ee216 +1766991 000000000037c623fe859224f934178fccfc542990d10798e339484537438705 +1767018 0000000000d26c94728ab4754ec6f5e1354eb1668120011d0ad8402dfe02e042 +1767052 00000000012d4cd70187c53367e5cd3cdef8173337e017a7cb02bb6813172ed3 +1767085 0000000000f6a95dc55bb8113eb0077f13d841d39a1d3bcaab7bba00a0486cbe +1767123 00000000015bedc50e1b02eed2fd598373c9a7685b8342805e61dffaf51726b4 +1767159 0000000000c885a54e0eca3c3cfc4bc73571b06d68395f2b2ce1bccdf4271c5a +1767193 0000000002216b25daac2ad2a0074b7bb4ff8f1c741ebbbadf1e6855d2c3876c +1767227 00000000015c77c4bb32ec3bcb996657c967343c9042a8bf40ffe586a111bed8 +1767262 0000000001b3a65016e59a3acbd2e329f2b804e9dc0ed6bc77df5db4decc3907 +1767299 00000000011137838df56221680c30844ff18a1e69616541d12cc30115303374 +1767334 00000000005aa3375289ec6b9302313f43640fa58a583ddedcd66364f89b212a +1767370 00000000010bca110b22225c9266929d6549bab720ae66aa3cf1223d03c288a1 +1767410 00000000006f0e18d267e01a00895b24ded4a4d41cb5d755358128aeb24820d9 +1767449 00000000015bf62147563bc9c725beafe075ab4698c804042f9014cd6f74d6af +1767482 0000000000cc5cf83c99044c62a57c0e366b1cd7d7f7185abbfed49eedd6f4ff +1767516 0000000001a6cc6d043773611249d5a21a303c5b6982dea6c52454dfee98e66a +1767554 0000000001442f977254563ce8abe22147534e42a34bf85e6e9d634d597f9a0c +1767599 000000000029b86c12da57b66ed72c54923fe2b833a9ade17ce9adcdd73f99a4 +1767629 00000000018a7b93c998a89fcef2d802e8f33d5e22b711599fd4498dffd870d2 +1767667 000000000141a52d595692bec37b834e6e990098c393b599883d5f45340197a9 +1767700 0000000000958a37f93160b10fd971a813c24c4281faa8b85f00fc373a035987 +1767757 0000000001186ecb2303b63d02d212f2dda9dee8901337886ee99bd10f86de11 +1767817 000000000163c1954f0ba82381ae67a572aab84ba13d0404ed37e46cde85ac9d +1767875 0000000000643f46227832513f6c36b6c058c1b5c3719e2819fb9143928808c6 +1767926 0000000000a4c3e17cb6e70ef53b43ced73f0031cc0d521587c80f0f5d894b9c +1767979 00000000015a50728ad309af9c0e02a289fea1127d58495067df58e9dce2bd75 +1768022 00000000009d19350aad093fd5c7dab7b42dcc31ac1caf3fadb9641ead934054 +1768059 000000000099d9957bd4b0c8211c8efcd351de967323d63879aeb93f9b37b5a9 +1768089 00000000017ec0bc51a341bc5543813b2fbf00a21ca0a5355d5cd5db92f4a4a2 +1768123 00000000019d2a64fd94a426df941fefda2cb9b209ee589dbf107a3b16a811be +1768154 0000000000d7e3e463650ef0362e4febcc8befada6aec2d26615f8e8c5104a95 +1768190 0000000001bf53d8bf0d824ead931df7353a775640e6d2b8b4b3aaa9fed5bd76 +1768213 0000000000a6926014efe0ac8a0ee8ec6e7abdeb2727b2e35657c9dde46ac5db +1768248 00000000002fa5a7a24e9d2bbe30879514f10fb758e9440cff655d3bdec64834 +1768280 0000000001588f9ddfa756c16bde34321b87c2c222a7c41a5f3fc93e9b8ddc2c +1768306 000000000112ec3d8df4729051da18acbd1cab8277d9564fa988d6a34e62f688 +1768339 00000000017b523466cbf7efc208dfd7442a9967eb581da15b814d6fcdbd3def +1768368 0000000000e9a78094278cc57de2f812d8fb05dc98a2ff0e6cf2a861659d597d +1768395 0000000001f28555458218d734cb5a2f40f0596676e8e14a67b6c0a0967f8a3e +1768426 00000000011b29f1f4d1b208c127e91aad5ed11a01b25718e8d556877b85efcf +1768474 000000000044bf1bcb391ec996285d223d3662495e2a068c9f64ce5294711db2 +1768514 0000000001718637f334a1a0549b20c8541abb4aade997a20e3caf30d870c97a +1768557 00000000014472dbdf1043803236f979857f883f1c41129ccc664af8962a80bd +1768595 0000000001057601571f5ff2c8e34b394b124e4c6447ce75a5a8beb025200ffa +1768651 000000000095e0c8ea33b17b02a2328c85b4a6c09223861d94817bc0431f06ad +1768685 0000000000b4535938831699bc19266c46d61138515061edae3807424e23d76d +1768729 00000000010722f1c059b0df725305f6125ee399d5ea5b366c611323ed666046 +1768760 000000000093f1f76d24d2b1c443b45c5823a5ec2f3fc649f6241a3a9db70d8a +1768811 0000000000b4d39c8fa4f23ec9021dd33f3ec54642432855085e4637394cb69c +1768849 000000000018c178c288e337660cca0ff941a8f127eb31f3b508b95c684d0edc +1768888 00000000005c0d94a0112149c05570bab4e11431a7cd771d60afac451a6e2f60 +1768922 00000000014ee6fa597efd44630dc7d646aad17578df132d71fea076efc7ac22 +1768949 0000000000be483cdd91703776d113e0e67d45ea6e9cd4c65b3ffb304d08dea3 +1768983 00000000013eb4fd0cdb8fa491c1903ad3f020ae974f85b274e27a325fae7e17 +1769008 0000000000351dd68807d00fd127c8338ed5573a0d0310d5a59fc354c9203647 +1769035 0000000000faa5235dff97e08803259fcdf0f46c306d6548373675ce77ab6853 +1769070 00000000011a6f9c23a915e80e3ecc0d73a7414573b4d18699e9a363d1fa216b +1769095 00000000018c09970a45e5e7b4dd585483f2d054a87b80fe3b156d1a9618191b +1769127 0000000001911ff6ba0ee1bc409b50cfe39f8a5e5834718b1dfedc4a761a88b6 +1769156 00000000005350b433639936247aab9780e2a532028a40d6d814f6be8de0a608 +1769195 000000000066f3caac98ce9df16f3da04afe4f31616f9c5ce20ca307d7785a7f +1769220 0000000001710de92befccdee6ceae5fc1bb57eb6626e340bc509d9517a48396 +1769250 00000000006d550395e91f4a1c07108703a28d1a9a2cfaf3538fd7a6418f4d69 +1769273 000000000182b6062dc593e3eda0c64ee385b653088d34c5a00a54a68fc23cde +1769294 0000000001cb36c5ca0c5a99fc4c3a7aa086b9bd34aec60b12d4ec850e189d2f +1769330 00000000011b2831f62f48068db3c95443708f176849fb5a17677c6c0199da9f +1769355 0000000000f68701d669e2e8775b30479c1738b7fc1eb7e27ad8f2f655d257a4 +1769386 0000000000e75c0f5896a24d81c653b6a7d2733ebd8f053c4d5fe1062efca67c +1769418 0000000000da61b170cf4bdd94e56875bcd1c945da7ec0c9accb5b5a537498c6 +1769450 000000000047a118884351b30249275ce11e541910b12278968025c1bb9e1fc7 +1769478 00000000017bfe8903a43a591a91be98edc8052cb2f8f00ee433fea289d90c76 +1769508 00000000004b6af2c96951756a9d7c25e01a6acf8a078325b73e0b659969a58d +1769535 0000000001ac02b8b8e460247ec9224535c0fd61156f06393a3f24322c391335 +1769565 00000000019bd7107c4698b87205c7b0f18548267d8bd6f779dce7969d0caed2 +1769672 0000000000a247af09054d35730e5d047da24166ca3415c864d557250a1263ba +1769831 000000000052978fbd6b97feb523e9ab60c1a2d1de480df65a2ee09c79d0d74d +1769858 0000000000493a34435b55685890cd13611b948dbd8945ccda4e93790d6c64b5 +1769881 0000000000b5f319fb7f400de25c982a95d2ceb1fdf3bcf797770c7ec1c96721 +1769922 000000000180be91c786d39014a6512982fc0f6c1f303d1a718743e6cdcebad5 +1769952 00000000011e7d27c5c2d0a17807c1de9e926f0a56a5f720e2470dfec87f51fa +1769979 00000000009d7b9f9d1f7a4c94d1423f81c2118ca895850cb1c7f06734644e66 +1770011 0000000000dded52828bff85ac967f70889c73c364a1fe77752ad0d676728b49 +1770038 0000000000ca47080c75ea4f5fd1c263d7377b09683d34bc1e18e96fc9860367 +1770069 0000000000ac85538f242b53ea68687212d01355475312ecd63c53b1468f47a8 +1770095 00000000005dfadc145a1f3fd6782c13185df63254966293bb47fc49dd34ede5 +1770125 00000000000a29ef156adec5480472c2610fb0fe62b6423a0ee6b261e46c21f7 +1770155 0000000000c58f5a67922d2e5fc31c43c9e5c1434695b04777ec345a5ae56147 +1770182 0000000001733a461c3a36c6dd3a00f0c01689987291152da5282f605913b648 +1770212 0000000000b68d7ca324111b87c07bf62bd44efb6727a52fcc358b4c7c6bc3b3 +1770242 0000000000af6308f146d4771180ac98e72732681e2b7b4c4ef694f9b07d9e61 +1770274 000000000051b4eaefc0e9d1201435cfd4e7c4acfa15aae95362c9489133b2c6 +1770310 00000000006fc9eb2ffdb5aa41752a379e0c72d4071672430c62a4e5afc86083 +1770339 0000000000f30a0633759ec23f6fe6e81dcc5a1bd1593a760d7d708da6247957 +1770367 00000000008b314f9b23efc89a0c24952fc0c3c777ff14e8233f028336a1400c +1770389 000000000077a42aa9191dc5e59b8833ef2f068073f28a787519fbc9093596aa +1770419 000000000061939ab16d1728b6b8820051adfd468a9776d4f8016edfc04c50a5 +1770448 000000000121c4a1798d38d82fa265402a1aca0a548f3721ba8ee8a5d6a94774 +1770476 00000000013c2ac0cfbabdfdcc0dae4f0d33d25db2443530c5504af2bbe8d7c3 +1770504 0000000001bea71ee16254da2ec7801aea192ffaeaf5952a0e3cc1cbc2a0163d +1770535 00000000003c4a6ee41c291607773ebfe1b9088348dc95442bae91691f10223d +1770569 00000000005e2c8077c05cdbff830db2fa1af4aaead4996fc93f0e9c86fa5aa6 +1770596 00000000012b27eaa19fc17dd4b6dbfc8ead4cffa31c5841fbb53884d85c106b +1770621 000000000194972b39283307bafad7e40e04617245567501d17b7eee77797dce +1770653 0000000000be84c8a4c66c9708b5cfa5d4ad733ba267198a483203ae79b37cf3 +1770678 00000000012ab6dbc23b1a3aa5256f6b34c0854d7c7bc0b4c2b18297df27629d +1770711 000000000118426da418c01338b28f072d831801d44b789951108c30805114c3 +1770739 00000000011f9474fd28dae53778ea1f015bd8be9295edf77cc5c6efe502e558 +1770770 00000000007622bd53114cb231d03b66b83993fddb8a7b0b5158e93f7bd4dd6a +1770800 0000000000b74a2131329b8badc65dfdaf2e29bd7353786b6b023db1e142aac8 +1770836 00000000015fc512928628b41f526f2b52d49083e7a392b5fbaa5c8ef8da7059 +1770869 000000000144054b375c6f77758d5e17a2cc3870baff19fac756c6b1b82343b2 +1770896 00000000016a00caf4cecbae1c926535569b713aa6a839660f55ee767e21f142 +1770935 0000000000e7ed2022dc38b0d3352c5ddcb05004141bf24750437cb71383ece1 +1770970 0000000001174c127a36db7b254384a76a323268a018686fcd79f6887b653479 +1771001 0000000000aa2735fe0edca8c6e6e6eec3444098aa8dca5cce45b75e94976997 +1771037 00000000012851211d04302f60c3f785f6e18a332c0e90cf790fbd5858368a2b +1771072 000000000058af5c4788fb8322846a5e4c08dd880446a22e4ae9f1fd1b97a2db +1771108 00000000002a9576e897993652ffcdf10372f48291e8daf3a20c86e536330965 +1771144 00000000004d1b10e006307be254c70abedf9b7b72d6e9c8b930cf3353b78ca6 +1771179 0000000000aa475334f8fe5851cf1d8b488ebbe77a9926cf6b083b448744cdcc +1771207 00000000008f34cce5cef8f191a60c15919b1769be7a82341b11f97659aa9029 +1771237 00000000006195c11375543633cde5956a6f0138bd71e2fa19867ad73c017df9 +1771267 00000000009e43aa74136c470dcf1ba946b171dd4ccae493e21cd50da3c9fcc3 +1771299 0000000001af748864cadf233a3d27473104e103229a454328848f6004210285 +1771328 00000000017ba4059d10364652f7eb37ea3d6b725f8db62e048dd2d1721a9c4b +1771361 0000000001bd43b37dbcf53e80c81705efbab37a43fefe73c6b2febd39ff894f +1771390 000000000094c84c9042201d02a90e8e558d5963a31f11c7419875aaeab67760 +1771420 000000000077ec33fc13e4cf6c890a756d500657b7182293b6ba014079f37ea5 +1771449 000000000179c9f8112786a28205000945cf6b1de782c27dfc8e3a1e1a5019bb +1771473 0000000001865b5b39770b7eaedec401cee3d63278d2bd36a0e2ce0d574a4024 +1771499 00000000015aa20995643dbab8647ba2b995bdf4446c26b972f08b51cd616dc5 +1771526 0000000001be2042721e51dd87e1f42f47e2e8ef7c24e92b4e3cb2ece190355c +1771560 0000000000304e59149ae478f2b2b277684351a603e3793ecbd8792d1f9f5151 +1771588 0000000000c360086da223c2798e5a955ff8387f7ba786af4d2447ba3418477d +1771617 0000000000590d855c6d7dfdefad9f66befa4924e4e90d704a36f8ef4fe28be8 +1771643 000000000122a8335b513abc680408024833e68755e9713b89564b08cbdd6285 +1771670 000000000196540d481e57948dc8f7597b14132f6e19bc93e5f6391fe2919788 +1771697 000000000095b5bee161cc5ff4507a493de110a0ddb2b75debc1d097674cf353 +1771722 00000000006ccba7b035c531f0946d5c8ce72829309b95c282e84a33de728214 +1771752 0000000000229b588ee5b722fa09a7c77e8483e08cedbb902740e401176577a4 +1771782 0000000000261f1b61f8f1a377c281b6fe7810674f72037b64e5a8697866d52e +1771817 00000000013dd2983ff4bf73bace47c629c1607a48f5f5a1afa551356d44c6e4 +1771845 000000000068b38f148c34dd1d275acc6fb7f066ec27d22e0898dbbfb4dcaa96 +1771874 000000000174ddb946fd0df62012289f958ae908eac30fc44e15106c9042d875 +1771905 00000000013ca23bd561dc4f35865310bb1c9c06f80dea91dbe0988344afec32 +1771943 0000000000bf6ecddcc345d1dc907a6a41f9400be1ef35b420cd459844267036 +1771976 00000000005b32ba467a39baaf2081b8f0f8cb04413d02fac3db428c8219afd6 +1772015 00000000018de706c81cd8d0d9b6de9970095ff8604e3e5479a9930217bc661e +1772042 0000000000a0103b0db70a16f34b0b8affda6cc4972b4c0a75a04c81e4230824 +1772065 00000000017413e8fdcffca44c109da1e5461ecaa9dd5765b495d35a1241330e +1772098 0000000001c2dd377831e08dba6f8ef93032445a5cae6deab5dfac8e501f45db +1772133 0000000001771579307cf5aae72b4cfa8618ccc3822b77a310045f8c84a75e93 +1772166 0000000000f77582ee16b57017fd9b62c7a51d259b843bec07afa9c2159e0f3e +1772214 0000000000f747464ce18e66208e5a9ad7b31b3de57e8cbf84491a93b55e9f33 +1772252 000000000188451a4f89a7f6b0ac0ec0e62992a74c88cf23305a6bed59a1bf1a +1772296 0000000000bf3af1d5f812d32eae30eb1aa52c279a64d8ab80444d39d20a46db +1772319 0000000000f6aac7703242c95360db536ceff0de40f30bd17ad3aabe0ee15b7c +1772350 0000000000598b7e0496bb96d74d2a592769b39b69ed634baaaa29957f8a94a4 +1772380 00000000014cc1473b8d2d654908c111e7e3d43ae618b78b374ba579d28e55d8 +1772409 0000000000441f6d3c35493c4554afea262860c160cee7c19fe0429e4a4518b6 +1772441 000000000036adffe79865a304890f29e08505d577ca39b7829c331ab573e766 +1772473 000000000021cf7d19fb05a2070aded9e81e6e55d4470cee81fff8fbc70cadf4 +1772500 0000000001109c6049218a78d26eb16b0d5e6a46aa1918b39e8adb9c8d892320 +1772534 00000000002f8d07b654365736e61fb889110ae9393731da3ad7a781baae75b3 +1772567 000000000019b0830be65ff17950249b4b9ab711721ac8863f85815d5a0a92b7 +1772595 0000000000039bfc80bb8bb46d7994bda7d7ed4c30f7f21f6e1db9288a91492e +1772628 00000000018541e18c66268cbabba66d58d9549fdad4fe0db5e8aeae45d5ca92 +1772658 0000000000ec541fe3ec80cb86715f3235c48d93160916d4e390f249ec3dd989 +1772690 0000000000bcd36ece18f78d5755b527c0e10221cc951229bd6e93bf52f1156c +1772717 0000000000d43a3ee737eb97a8ccee3a773d7c4670ede0ff40219e9567843254 +1772743 000000000005d85dd7f711ee961796406873783ccd39a9dbda4d960b004dd44f +1772771 000000000113288f12390b8a24f0d76503b36c7b817a2e18ba0c893305d344e5 +1772801 0000000000ddc25799af357dd3c17fedaa071a22ec097efe87122d175f109de1 +1772844 00000000011c88b3ddb6e7cf08153ff4b56eacfe7bf8529afcf6038b76ab1d15 +1772885 0000000000b3eb9c231c858114dd1c1bb18e9f95c44a8d3f0ba7570a96afc3c4 +1772924 00000000016a3856c9af21d6b6ba1d5388d364da383ac0cdbc4da98ca04503c0 +1772969 000000000029a2075b7d36fe55e8c28587d4030686b9516bcb382c5a94b26871 +1773005 0000000000aec3502a867c81de6648af673b6d9eb70ccec89f94dca2ade1484f +1773045 00000000015adff51c9d39ca39a37792cbef07f53538ab90ac15348dbf5d2dc8 +1773073 00000000013930d7d846e84bc5d1b019307645676c4ba4c37259a2b6f30cc123 +1773113 0000000000a844190dd60f1b1a035cdce992967fce597a388a55b28a0861831c +1773163 0000000000111914a3074d3726811c9334c2d5899f6339be0e3774d0a2cdfffc +1773212 000000000179fbe06ea5820bef4f784080cff93a53ea66aa5db7681f973b8d8e +1773284 000000000114fc4535334216db28c8ca767c0fa23e5d57bad0a1ad60431ff309 +1773358 000000000169d5b572afb180e591e5d49c24986bf3640201c7943d3ab0fdf2f0 +1773435 00000000013a256f9176923c4b2e8679789fe8660f89af6694e355fcb1ca28cf +1773465 00000000002b150850e37f0ca259fa7d5ce87f6881f0caadc4beb512e9684e65 +1773495 000000000075f1e2797706887fdf5a273ba895a66b954fdb9087763afb2598ba +1773541 0000000000796ad1f0b4adea91ac7ddbb137f68846bb91a2b6e6a8e64a346042 +1773578 0000000000a4632ed23d089be03d9866546ee2a4f68449059c5da993a086b445 +1773610 000000000002db5c95656c6198135f8ef12f290aab45fda5c5143804e3ac8d18 +1773642 000000000132920ae54dd4873aed1d4ce506e13105193a60d3f784a8764c6f2b +1773679 0000000001b1d0fbc7dca2273617aac3eb9638a7ad8c4dec132befe0cd791390 +1773716 00000000016ffe7f8fcbc31600729ddee0dab08a31db8233215343d048ab30ca +1773745 000000000104fada50ba168cb7a04880fd337828f49a46401188bf0d53db1b58 +1773778 000000000093ab8c398fcda421550e95027aaa9733f5420386bceef6363f6b8e +1773813 00000000015f9a0977bc13d944036fe7840cb1778ff990178441cb7e72945583 +1773845 0000000000a5c847037957e20cfd9663cb02965a1a08c6d1b1780f9df3ea49a0 +1773881 0000000000ee66752bb6e71a75b056d782cddb753824b285c001f2de740f738d +1773925 0000000000e3cacad302bd092ac4c52680ea1f9e20a02415d1b7184409049bcb +1773948 00000000003da0b283eded5b91253dfe6556c4020614075c3a63ff3f0d182e40 +1773991 00000000004ad3d83bd313eed9ccfd95952e82804cd3e94119aa7648d649ba8f +1774025 0000000001671c03f9d5c8133bd2357814a5c6027df242ad65241ecfbe359e0b +1774074 000000000135728c68a552c28a1ef14e08b9ba8eab89ecd58cae96bbf2b40799 +1774142 000000000040b735db9cb6aa60558dc7b13e5d389969df87241f9cf5b50b3039 +1774186 00000000016801601e314dfe18a5ef1a7239657d4819c487a52eba4c0f7ae487 +1774222 0000000001387dc1f3dab96d9ab38b86c9a6f1f36ba7e0adc07ee45ddb23df8c +1774269 00000000008029e4dfe2e9016bd1e5b13ffb5bd28225175169cf1fc7f5758cff +1774325 000000000120e23b33181f37c384538548a3388e7345a6c453484d68c1086fcf +1774387 0000000001261b5a55c76670804e5a6d82ac8f74f6445eccc0d22db03c519076 +1774433 0000000000fad7cbf0f18b05b4ab3aa4acd0ad6533c2a62a1cc4529a8a8b7bf0 +1774482 0000000001675dd1948568bd2ba6517ca3a5c83a23e03e4d2a0d206b3dbc0597 +1774539 00000000017f17cdddb5950905a605961e6733ef0d0ca447dcbc35bdc573004d +1774590 0000000000b562c5765d212ab7d22ff4e71cb04bb7cb87aff941c0d3717b10f2 +1774642 000000000132da4944ea7656c264ddd8ba701b52eb27e30f2dd51763a481ebfd +1774703 0000000001209fc949110bae67b3c31f45f8f404ff466186319c64cfc53d22df +1774756 0000000000a02a0879f0e24f04d12701fcbb7e8892894a3544004a7aff232ac6 +1774826 000000000126ee9374474bf59b703713ebc49a03c02739156f8e0ae004dc66ab +1774866 0000000001632208ea95b42bbc38c9c6ab0e7e5ec5eb7356f54f301817d9788b +1774905 00000000003b787f90f11abe511c51f4f5b1b6e99a369e77c1415d49affaea95 +1774967 0000000000faae3cc98320f868feec66d6e375e3d95ed5745f111c9f28944c9c +1775021 0000000000cbdff1fa31c8a45439e73b570c3e1368035f6ee56bb90af65f971c +1775064 00000000007f4f3c1bc1254b53236d2e7974b373644ea6c2f405c27a9136826c +1775123 00000000004da571942eef4b5b949fbb0c54a1a022761150318fb8376a0b1f93 +1775169 000000000017e8a85dc7cb422ae23109ba75c363c37f4c81d420593e70440869 +1775215 0000000000b43eb11029ab0a0fa55ba49158ebf554f793f92ded315dd5f72032 +1775266 0000000001ac800bc2f8b709b086784070ef87ea16b228324035d46109b3f709 +1775317 000000000038dd45cf2f2da162880adb8952f5f312a3b48e7396474b893b6151 +1775363 0000000000fc4eb9a3296bac3016f741421fb213979df095693eabc046bd5847 +1775406 0000000001077289de75c6387c61e74caee6240a961ce242bb0b4cb962d7c725 +1775464 00000000011dd0da4df8136c8b171005dd6fd82dc1e3ab721abc48d9b604782e +1775517 00000000000fd383f863ed8d90e0e97ae84b40e52355b766fe893bfedece2268 +1775578 000000000089eb668b4affcebd8c415921998607ca2ce0cfcfa45fc790bf7a67 +1775639 0000000000be238867bb69cc961c32cabbfef35f85b84bb0d0c0bda00cc978e7 +1775706 00000000010d35b0a5a1a359a756610f7bc79a0fe7cbffcadbac6d501db6474f +1775744 0000000002260b190a2947179acd3e7e8204d31086dca7dd0a8b2818bf93e743 +1775780 00000000000bf0007a9d7691179a28de6aa4d2f54287fb1a9afdcaa5a6a9e2ae +1775813 00000000003669a23565610ecc828ccab846bd7aa0b5fb797aa38b4ee0f0b5b1 +1775843 00000000012c128dbc7abd797455fabc31f9c3b13025f98262acff57f44fd430 +1775871 000000000099c53c9f5bcbe682dc53ded35a2e7ad95d805b4412a786a8bc7192 +1775903 00000000014f16acc0b5149674d56dcab87482bc0e8c667205c30e8ee5d59797 +1775936 0000000000afab0923ec346845ecc5f8a4eca6bd5de52027b8231e56087a0b00 +1775965 00000000003e8ea54b63b80f802f3a4dbbc85e408c5353f3a7fdb5091a8aae46 +1775994 0000000001a425d9ce20b47af6e3ffab8c1fff42fd247f7daa537a6ecc1716ad +1776029 0000000001b115f960337bef9911199202c06991d815b76ebde5439aeebfd73b +1776063 00000000015bfb2623d1c4f58a6238839433f3afe0c9f1eb7675de10a9cd2996 +1776097 0000000000607aa22bbe0a3a7d8265bc10ef5ca86eeac27302d236f00145f16e +1776128 000000000169185e9d84489a0e6770fa12e5148f7f4efebd13a91b57941ca3c8 +1776157 0000000000931629a573dd9388ab1cf7b5d16c4f92dda13715102359c37a947b +1776185 0000000001b152e6c0754042a273c6eecf60b4bdbc57e399c1fe3270d428387a +1776214 0000000001b23991ba236736f15b3cfd819727337f67bc4fb0275bdc91534342 +1776243 0000000000058a40cfb4d2013634db41f0fc89d3be00c6c7a96dc4637e038bc5 +1776274 00000000011433a5b8b82d9ede50a6d993a8b1e98fa305c9514e0e4913ce568b +1776303 00000000007124f4e394da38ea202ca26c3fece3eafce1bbd357c49859d9023e +1776332 0000000000c083d3df5e223142f0250d392f67bee3e8ae96b0f4128ce9a24739 +1776369 000000000107a8f19ec7f145a69dd86b991b2a6fd00ec1234251c23202b1b63c +1776398 000000000113096f8697b20b06cf94643496a95cd1e3ee11f0b3585021d81131 +1776428 00000000017a3c57620983c31c828566e684e13f5390de96e3723cbb6a2346bc +1776454 000000000128b6d4ee73446be2b6655c329715f03d74117b5905d736b6230532 +1776484 0000000000137642152694c14fc39e51eaf7b79583fa4d6fa51e66f0efe76b76 +1776518 0000000000e09ffa7cd99ab5eaff1544d6c1da97bb2af8eb6f6126de2a01ec88 +1776544 00000000019a6f80b4f7f058a6561299e572185dcb015746784be93e87ab6a1a +1776572 0000000000dcded444e23cb60c7059b38311829f68300e915b456b78a1c688f4 +1776607 0000000001774c8bf1386db3d1ca446a23188195c4199f7ffa77ef74a85be3ce +1776635 000000000065e85a333de29a42255fd5fa2b1b60a3d3ad43c60b80bdb058c4be +1776665 00000000013bc72730769473d71b98b386e65c616c1da2c2364b7520f10f8b69 +1776710 00000000013fb926d8087226534cff3fb32912209ff7eeace594fe0e381bc4f5 +1776742 000000000042d7818b3331fd08cf1d1cce3f36645b9ee642137100b7e8a2247b +1776773 00000000017155c6dc542cbda50b263fa6a780e4170dd18ae0e1fa1ecd5df1ce +1776806 0000000000962134edb0226c000d4d8bb4b5404bfeba48e91b815f5fd1e18ac0 +1776842 000000000134cc97a60a77c5e2b8ecdbe991a65978a271be54faa93c83159028 +1776870 000000000070e51fdf74f942a61e7bf69156ad90ae2b621b81694bc68f27bb1f +1776909 0000000001453511632985bfa1be2a21ae7eb73a2b12c08cd341f56d0fc942ff +1776943 0000000000baf28d3acab7faaedb1e4a4d560bc2f6e612fcd87bf09a9c428889 +1776980 00000000006e2a1b271fc5effaa64ad44530aeda8a5a565f3b039e8fea0f71bd +1777012 00000000001d67c76f1dee4077f4aa77ec1ec9b85b9c55211c0ad51eb069d3ec +1777047 00000000010919a9d2de116bb380d4954793f0230533f265b9a1e3db648a39d2 +1777083 0000000001b033b37fc9df66b8fadcf1a8b44ae5a64f244a64ff28a33b8dbecd +1777138 00000000015e6a75974fead78e4735605f4e7b76c64dfd094d4579315464a006 +1777178 0000000001a889eb6abc9d95417773f66eab40184868f5b8330eb23a2ab09d22 +1777215 00000000006c44aa2d86c44181f8f8d478fc410fbb6f3697d8ab92298c6baed9 +1777255 0000000000690c0c43a2d17a6a2361b27226d82f1ff496f7da0eec01e6d7d08c +1777304 00000000013892bbf9cbe3893e99250a142e87c2443c74d4a1fe41f81a1d85f4 +1777342 00000000012d7b855075af99c1a53ff7ffbd058c19dcc0afe07630feea852f21 +1777371 0000000001e3b44d75ac5e2d1a1a165e858a61bf5271caa797d2d1b3c6c96fb3 +1777421 0000000001c25c23ef85571fe0f78c818c0440db6143af5bb5b3c9824581ad66 +1777478 00000000020631eda70ad4f5e580a7ee0fe47f1cc88ed500113d677b728e71b8 From 5b3422c4a33ffeff077c4c9954093ad5e2edc6da Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 23 Aug 2022 09:31:04 +1000 Subject: [PATCH 02/42] Fix clippy::derive_partial_eq_without_eq in generated prost test code (#4927) --- zebrad/build.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/zebrad/build.rs b/zebrad/build.rs index 20138ba89c1..62560642b1e 100644 --- a/zebrad/build.rs +++ b/zebrad/build.rs @@ -60,6 +60,10 @@ fn main() { tonic_build::configure() .build_client(true) .build_server(false) + // The lightwalletd gRPC types don't use floats or complex collections, + // so we can derive `Eq` as well as the default generated `PartialEq` derive. + // This fixes `clippy::derive_partial_eq_without_eq` warnings. + .type_attribute(".", "#[derive(Eq)]") .compile( &["tests/common/lightwalletd/proto/service.proto"], &["tests/common/lightwalletd/proto"], From 52fa867cb834b51d2a69edb5200b7a946e02323a Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 23 Aug 2022 13:43:18 +1000 Subject: [PATCH 03/42] change(ci): Disable beta Rust tests and add parameter download logging (#4930) * Apply the same Rust logging settings to all GitHub workflows * Enable full optimisations in dev builds for downloading large parameter files * Disable beta Rust tests in CI --- .../workflows/build-crates-individually.yml | 7 +++--- .github/workflows/build-docker-image.yml | 1 + .../workflows/continous-integration-os.yml | 6 ++++- .github/workflows/coverage.yml | 1 + .github/workflows/docs.yml | 6 +++++ .github/workflows/lint.yml | 7 ++++++ Cargo.toml | 22 ++++++++++++++++--- 7 files changed, 43 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 4e65f6b8ad8..501bd904761 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -25,6 +25,7 @@ on: env: CARGO_INCREMENTAL: 0 + RUST_LOG: info RUST_BACKTRACE: full RUST_LIB_BACKTRACE: full COLORBT_SHOW_HIDDEN: '1' @@ -48,11 +49,11 @@ jobs: # This step is meant to dynamically create a JSON containing the values of each crate # available in this repo in the root directory. We use `cargo tree` to accomplish this task. # - # The result from `cargo tree` is then transform to JSON values between double quotes, + # The result from `cargo tree` is then transform to JSON values between double quotes, # and separated by commas, then added to a `crates.txt` and assigned to a $JSON_CRATES variable. # # A JSON object is created and assigned to a $MATRIX variable, which is use to create an output - # named `matrix`, which is then used as the input in following steps, + # named `matrix`, which is then used as the input in following steps, # using ` ${{ fromJson(needs.matrix.outputs.matrix) }}` - id: set-matrix name: Dynamically build crates JSON @@ -104,7 +105,7 @@ jobs: # We could use `features: ['', '--all-features', '--no-default-features']` as a matrix argument, # but it's faster to run these commands sequentially, so they can re-use the local cargo cache. - # + # # Some Zebra crates do not have any features, and most don't have any default features. - name: Build ${{ matrix.crate }} crate with no default features uses: actions-rs/cargo@v1.0.3 diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 818cbaa50d8..ba570bb51c3 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -36,6 +36,7 @@ on: rust_log: required: false type: string + default: info jobs: build: diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 18614b51c64..3389fd22906 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -36,6 +36,7 @@ on: env: CARGO_INCREMENTAL: 0 + RUST_LOG: info RUST_BACKTRACE: full RUST_LIB_BACKTRACE: full COLORBT_SHOW_HIDDEN: '1' @@ -54,11 +55,14 @@ jobs: # TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801 os: [ubuntu-latest, macos-latest] rust: [stable, beta] + exclude: + # TODO: re-enable beta Rust tests on ubuntu (#4929) + - os: ubuntu-latest + rust: beta # We're excluding macOS for the following reasons: # - the concurrent macOS runner limit is much lower than the Linux limit # - macOS is slower than Linux, and shouldn't have a build or test difference with Linux # - macOS is a second-tier Zebra support platform - exclude: - os: macos-latest rust: beta diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index cfa4e03708d..ae28188d5a4 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -30,6 +30,7 @@ on: env: CARGO_INCREMENTAL: 0 + RUST_LOG: info RUST_BACKTRACE: full RUST_LIB_BACKTRACE: full COLORBT_SHOW_HIDDEN: '1' diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index f989f57b488..4d53c9a1459 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -17,6 +17,12 @@ on: # workflow definitions - '.github/workflows/docs.yml' +env: + RUST_LOG: info + RUST_BACKTRACE: full + RUST_LIB_BACKTRACE: full + COLORBT_SHOW_HIDDEN: '1' + jobs: build: name: Build and Deploy Docs (+beta) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index cac408bb78e..56090ef490b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -10,6 +10,13 @@ on: branches: - 'main' +env: + CARGO_INCREMENTAL: 0 + RUST_LOG: info + RUST_BACKTRACE: full + RUST_LIB_BACKTRACE: full + COLORBT_SHOW_HIDDEN: '1' + jobs: changed-files: runs-on: ubuntu-latest diff --git a/Cargo.toml b/Cargo.toml index e2958c15afe..de480f72190 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,8 @@ panic = "abort" # Speed up tests by optimizing performance-critical crates +# Cryptographic crates + [profile.dev.package.blake2b_simd] opt-level = 3 @@ -41,11 +43,25 @@ opt-level = 3 [profile.dev.package.bls12_381] opt-level = 3 -[profile.dev.package.minreq] -opt-level = 1 +# Cryptographic and parameter download crates [profile.dev.package.zcash_proofs] -opt-level = 1 +opt-level = 3 + +[profile.dev.package.minreq] +opt-level = 3 + +[profile.dev.package.rustls] +opt-level = 3 + +[profile.dev.package.ring] +opt-level = 3 + +[profile.dev.package.spin] +opt-level = 3 + +[profile.dev.package.untrusted] +opt-level = 3 [profile.release] From 4ecaefed72aea7e8a4638dae0bbfe430a19e3d87 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Aug 2022 11:04:41 +0000 Subject: [PATCH 04/42] build(deps): bump w9jds/firebase-action from 2.2.2 to 11.5.0 (#4905) Bumps [w9jds/firebase-action](https://github.com/w9jds/firebase-action) from 2.2.2 to 11.5.0. - [Release notes](https://github.com/w9jds/firebase-action/releases) - [Commits](https://github.com/w9jds/firebase-action/compare/v2.2.2...v11.5.0) --- updated-dependencies: - dependency-name: w9jds/firebase-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 4d53c9a1459..ddff8841fd7 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -58,7 +58,7 @@ jobs: mdbook build book/ - name: Deploy Zebra book to firebase - uses: w9jds/firebase-action@v2.2.2 + uses: w9jds/firebase-action@v11.5.0 with: args: deploy env: @@ -74,7 +74,7 @@ jobs: RUSTDOCFLAGS: '--html-in-header katex-header.html' - name: Deploy external docs to firebase - uses: w9jds/firebase-action@v2.2.2 + uses: w9jds/firebase-action@v11.5.0 with: args: deploy env: @@ -88,7 +88,7 @@ jobs: RUSTDOCFLAGS: '--html-in-header katex-header.html' - name: Deploy internal docs to firebase - uses: w9jds/firebase-action@v2.2.2 + uses: w9jds/firebase-action@v11.5.0 with: args: deploy env: From 9fb87425b76ba3747985ea2f22043ff0276a03bd Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Tue, 23 Aug 2022 21:06:18 -0300 Subject: [PATCH 05/42] fix(tests): Update timeout for Zebra sync tests (#4918) * update timeout * update the doc comment * Increase test timeouts for Zebra update syncs * Stop failing the 1740k job if the cached state is after block 1740k Co-authored-by: teor --- .github/workflows/deploy-gcp-tests.yml | 5 +++-- zebrad/tests/common/launch.rs | 4 ++-- zebrad/tests/common/sync.rs | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 54c7899879c..9ee0f0f2426 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -629,7 +629,8 @@ jobs: '(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ " - # follow the logs of the test we just launched, up to block 1,740,000 (or the test finishing) + # follow the logs of the test we just launched, up to block 1,740,000 or later + # (or the test finishing) # # We chose this height because it was about 5 hours into the NU5 sync, at the end of July 2022. # This is a temporary workaround until we improve sync speeds. @@ -683,7 +684,7 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*current_height.*=.*174[0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(test result:.*finished in)' \ + '(estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)||(estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(test result:.*finished in)' \ " # follow the logs of the test we just launched, up to the last checkpoint (or the test finishing) diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index 075e5d23664..3c27a3039df 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -48,12 +48,12 @@ pub const BETWEEN_NODES_DELAY: Duration = Duration::from_secs(2); /// and `zebrad` takes about 30 minutes to update to the tip. /// /// TODO: reduce to 20 minutes when `zebrad` sync performance improves -pub const LIGHTWALLETD_UPDATE_TIP_DELAY: Duration = Duration::from_secs(60 * 60); +pub const LIGHTWALLETD_UPDATE_TIP_DELAY: Duration = Duration::from_secs(11 * 60 * 60); /// The amount of time we wait for lightwalletd to do a full sync to the tip. /// /// See [`LIGHTWALLETD_UPDATE_TIP_DELAY`] for details. -pub const LIGHTWALLETD_FULL_SYNC_TIP_DELAY: Duration = Duration::from_secs(150 * 60); +pub const LIGHTWALLETD_FULL_SYNC_TIP_DELAY: Duration = Duration::from_secs(11 * 60 * 60); /// The amount of extra time we wait for Zebra to sync to the tip, /// after we ignore a lightwalletd failure. diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index ffb34f9e18e..a9a8d283bd8 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -363,8 +363,8 @@ pub fn create_cached_database_height( ) -> Result<()> { eprintln!("creating cached database"); - // 16 hours - let timeout = Duration::from_secs(60 * 60 * 16); + // 20 hours + let timeout = Duration::from_secs(60 * 60 * 20); // Use a persistent state, so we can handle large syncs let mut config = cached_mandatory_checkpoint_test_config()?; From bcc325d7f8b337fe40adb9051789b737730e883e Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 23 Aug 2022 23:49:55 -0400 Subject: [PATCH 06/42] ci(auth): retry GCP authentication if fails (#4940) Previous behavior: Sometimes Google Cloud authentication fails, this might happen before IAM permissions are fully propagated Expected behavior: If the authentication fails, retry at least 3 times before exiting with a non zero exit code Applied solution: Google GitHub Actions for auth recently added this a `retries` feature which is now implemented to workaround this issue. Note: https://github.com/google-github-actions/auth/commit/95a6bc2a27ae409a01ea58dd0732eccaa088ec07 Fixes https://github.com/ZcashFoundation/zebra/issues/4846 --- .github/workflows/build-docker-image.yml | 1 + .github/workflows/continous-delivery.yml | 2 ++ .github/workflows/continous-integration-docker.yml | 1 + .github/workflows/delete-gcp-resources.yml | 1 + .github/workflows/deploy-gcp-tests.yml | 11 +++++++++++ .github/workflows/zcash-lightwalletd.yml | 1 + .github/workflows/zcashd-manual-deploy.yml | 1 + 7 files changed, 18 insertions(+) diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index ba570bb51c3..9b337a1a29d 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -83,6 +83,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 7bb1bb17bee..51401205f83 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -56,6 +56,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -120,6 +121,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 14fdd108d4a..4b6a0e4f294 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -87,6 +87,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' diff --git a/.github/workflows/delete-gcp-resources.yml b/.github/workflows/delete-gcp-resources.yml index 6cb58f7a857..44a994b347c 100644 --- a/.github/workflows/delete-gcp-resources.yml +++ b/.github/workflows/delete-gcp-resources.yml @@ -18,6 +18,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 9ee0f0f2426..cae43077d24 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -120,6 +120,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -187,6 +188,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -241,6 +243,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -369,6 +372,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -494,6 +498,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -552,6 +557,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -606,6 +612,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -664,6 +671,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -718,6 +726,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -774,6 +783,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' @@ -831,6 +841,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index f04f032f986..959c76eb15e 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -101,6 +101,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' diff --git a/.github/workflows/zcashd-manual-deploy.yml b/.github/workflows/zcashd-manual-deploy.yml index 0099b307509..515cee44ce1 100644 --- a/.github/workflows/zcashd-manual-deploy.yml +++ b/.github/workflows/zcashd-manual-deploy.yml @@ -42,6 +42,7 @@ jobs: id: auth uses: google-github-actions/auth@v0.8.0 with: + retries: '3' workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' token_format: 'access_token' From c87073ed44ac78729af88df9e60dd66938e5e653 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Aug 2022 10:45:36 +0000 Subject: [PATCH 07/42] build(deps): bump tj-actions/changed-files from 24 to 29.0.0 (#4936) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 24 to 29.0.0. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v24...v29.0.0) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 56090ef490b..9f1be10a969 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,7 +32,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v24 + uses: tj-actions/changed-files@v29.0.0 with: files: | **/*.rs @@ -44,7 +44,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v24 + uses: tj-actions/changed-files@v29.0.0 with: files: | .github/workflows/*.yml From 357dfee5140a6650b9abc0c3bdfaeccd6e34469a Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 25 Aug 2022 04:36:51 +1000 Subject: [PATCH 08/42] Update supported Rust versions in README (#4938) --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cfcf821c2dd..3e2236e4d1f 100644 --- a/README.md +++ b/README.md @@ -80,8 +80,9 @@ for your platform: 1. Install [`cargo` and `rustc`](https://www.rust-lang.org/tools/install). - Zebra is tested with the latest `stable` Rust version. - Earlier versions are not supported or tested, but they might work. - (Rust 1.57 and earlier are not supported, due to missing features.) + Earlier versions are not supported or tested. + Any Zebra release can remove support for older Rust versions, without any notice. + (Rust 1.59 and earlier are definitely not supported, due to missing features.) 2. Install Zebra's build dependencies: - **libclang:** the `libclang`, `libclang-dev`, `llvm`, or `llvm-dev` packages, depending on your package manager - **clang** or another C++ compiler: `g++`, `Xcode`, or `MSVC` From 4282af46909fbd37edc451f889711cb8ada17d66 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 25 Aug 2022 08:23:44 +1000 Subject: [PATCH 09/42] ci(mergify): increase batch sizes (#4947) * Increase Mergify batch sizes * Use the maximum mergify batch size, except for critical priority PRs --- .github/mergify.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index c01839d05ca..687cbfcfc55 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -5,7 +5,7 @@ queue_rules: allow_inplace_checks: True allow_checks_interruption: False speculative_checks: 1 - batch_size: 5 + batch_size: 8 # Wait a short time to embark hotfixes together in a merge train batch_max_wait_time: "2 minutes" conditions: @@ -18,9 +18,9 @@ queue_rules: allow_inplace_checks: True allow_checks_interruption: True speculative_checks: 1 - batch_size: 5 + batch_size: 20 # Wait for a few minutes to embark high priority tickets together in a merge train - batch_max_wait_time: "5 minutes" + batch_max_wait_time: "10 minutes" conditions: - base=main @@ -28,9 +28,9 @@ queue_rules: allow_inplace_checks: True allow_checks_interruption: True speculative_checks: 1 - batch_size: 5 + batch_size: 20 # Wait a bit longer to embark low priority tickets together in a merge train - batch_max_wait_time: "10 minutes" + batch_max_wait_time: "20 minutes" conditions: - base=main @@ -85,4 +85,4 @@ pull_request_rules: actions: queue: name: low - method: squash \ No newline at end of file + method: squash From 7fc3cdd2b219d38eb7bdefb611a5924d52974093 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 25 Aug 2022 16:41:45 +1000 Subject: [PATCH 10/42] Increase CI disk size to 200GB (#4945) --- .github/workflows/deploy-gcp-tests.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index cae43077d24..0cdea53bd2a 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -130,9 +130,9 @@ jobs: id: create-instance run: | gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --boot-disk-size 100GB \ + --boot-disk-size 200GB \ --boot-disk-type pd-ssd \ - --create-disk name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=100GB,type=pd-ssd \ + --create-disk name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=200GB,type=pd-ssd \ --container-image debian:buster \ --container-restart-policy=never \ --machine-type ${{ env.MACHINE_TYPE }} \ @@ -313,9 +313,9 @@ jobs: id: create-instance run: | gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --boot-disk-size 100GB \ + --boot-disk-size 200GB \ --boot-disk-type pd-ssd \ - --create-disk image=${{ env.CACHED_DISK_NAME }},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=100GB,type=pd-ssd \ + --create-disk image=${{ env.CACHED_DISK_NAME }},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=200GB,type=pd-ssd \ --container-image debian:buster \ --container-restart-policy=never \ --machine-type ${{ env.MACHINE_TYPE }} \ From 0a39011b8868d6275f546b71c0ccd4b0b03c58f0 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 25 Aug 2022 23:09:20 +1000 Subject: [PATCH 11/42] fix(ci): Write cached state images after update syncs, and use the latest image from any commit (#4949) * Save cached state on full syncs and updates * Add an -update suffix to CI images created by updating cached state * Make disk image names unique by adding a time suffix * Use the latest image from any branch, but prefer the current commit if available * Document Zebra's continuous integration tests * Fix typos in environmental variable names * Expand documentation * Fix variable name typo * Fix shell syntax --- .../continous-integration-docker.yml | 10 ++- .github/workflows/deploy-gcp-tests.yml | 72 +++++++++++++++---- book/src/dev/continuous-integration.md | 26 +++++++ 3 files changed, 87 insertions(+), 21 deletions(-) create mode 100644 book/src/dev/continuous-integration.md diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 4b6a0e4f294..e15a46896b1 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -330,11 +330,10 @@ jobs: test_description: Test syncing to tip with a Zebra tip state test_variables: '-e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true - # TODO: do we want to update the disk on every PR, to increase CI speed? - saves_to_disk: false + # update the disk on every PR, to increase CI speed + saves_to_disk: true disk_suffix: tip root_state_path: '/var/cache' - # TODO: do we also want to test the `zebrad` part of the `lwd-cache`? (But not update it.) zebra_state_dir: 'zebrad-cache' # Test that Zebra can answer a synthetic RPC call, using a cached Zebra tip state @@ -403,7 +402,6 @@ jobs: # to also run on Mergify head branches, # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - # TODO: this test is unreliable, in the meanwhile we'll only generate a new lwd cached state when a full sync is also triggered if: ${{ (!cancelled() && !failure() && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true') || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) }} with: app_name: lightwalletd @@ -438,8 +436,8 @@ jobs: test_variables: '-e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' needs_zebra_state: true needs_lwd_state: true - # TODO: do we want to update the disk on every PR, to increase CI speed? - saves_to_disk: false + # update the disk on every PR, to increase CI speed + saves_to_disk: true disk_prefix: lwd-cache disk_suffix: tip root_state_path: '/var/cache' diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 0cdea53bd2a..f6d54524dac 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -59,6 +59,12 @@ on: required: false type: boolean description: 'Does the test use Lightwalletd and Zebra cached state?' + # main branch states can be outdated and slower, but they can also be more reliable + prefer_main_cached_state: + required: false + type: boolean + default: false + description: 'Does the test prefer to use a main branch cached state?' saves_to_disk: required: true type: boolean @@ -259,7 +265,10 @@ jobs: # - To ${{ inputs.zebra_state_dir || inputs.disk_prefix }} if not # # If there are multiple disks: - # - prefer images generated from this branch, then the `main` branch, then any other branch + # - prefer images generated from this branch and commit, then + # - if prefer_main_cached_state is true, prefer images from the `main` branch, then + # - use images from any other branch. + # Within each of these categories: # - prefer newer images to older images # # Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable @@ -278,31 +287,46 @@ jobs: # Try to find an image generated from this branch and commit # Fields are listed in the "Create image from state disk" step - BRANCH_DISK_NAME="${DISK_PREFIX}-${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" - CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${BRANCH_DISK_NAME}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT} Disk: $CACHED_DISK_NAME" + COMMIT_DISK_PREFIX="${DISK_PREFIX}-${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" + COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + echo "${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT} Disk: $COMMIT_CACHED_DISK_NAME" + if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then + echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')" + fi - if [[ -z "$CACHED_DISK_NAME" ]]; then - # Try to find an image generated from the main branch - CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "main Disk: $CACHED_DISK_NAME" + # Try to find an image generated from the main branch + MAIN_CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + echo "main Disk: $MAIN_CACHED_DISK_NAME" + if [[ -n "$MAIN_CACHED_DISK_NAME" ]]; then + echo "Description: $(gcloud compute images describe $MAIN_CACHED_DISK_NAME --format='value(DESCRIPTION)')" fi + # Try to find an image generated from any other branch + ANY_CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + echo "any branch Disk: $ANY_CACHED_DISK_NAME" + if [[ -n "$ANY_CACHED_DISK_NAME" ]]; then + echo "Description: $(gcloud compute images describe $ANY_CACHED_DISK_NAME --format='value(DESCRIPTION)')" + fi + + # Select a cached disk based on the job settings + CACHED_DISK_NAME="$COMMIT_CACHED_DISK_NAME" + if [[ -z "$CACHED_DISK_NAME" ]] && [[ "${{ inputs.prefer_main_cached_state }}" == "true" ]]; then + echo "Preferring main branch cached state to other branches..." + CACHED_DISK_NAME="$MAIN_CACHED_DISK_NAME" + fi if [[ -z "$CACHED_DISK_NAME" ]]; then - # Try to find an image generated from any other branch - CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "any branch Disk: $CACHED_DISK_NAME" + CACHED_DISK_NAME="$ANY_CACHED_DISK_NAME" fi if [[ -z "$CACHED_DISK_NAME" ]]; then echo "No cached state disk available" - echo "Expected ${BRANCH_DISK_NAME}" - echo "Also searched for any commit on main, and any commit on any branch" + echo "Expected ${COMMIT_DISK_PREFIX}" + echo "Also searched for cached disks from other branches" echo "Cached state test jobs must depend on the cached state rebuild job" exit 1 fi - echo "Description: $(gcloud compute images describe $CACHED_DISK_NAME --format='value(DESCRIPTION)')" + echo "Selected Disk: $CACHED_DISK_NAME" echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> $GITHUB_ENV echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> $GITHUB_ENV @@ -956,6 +980,23 @@ jobs: SYNC_HEIGHT=$(echo $DOCKER_LOGS | grep -oE '${{ inputs.height_grep_text }}\([0-9]+\)' | grep -oE '[0-9]+' | tail -1 || [[ $? == 1 ]]) echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> $GITHUB_ENV + # Sets the $UPDATE_SUFFIX env var to "-update" if using cached state, + # and the empty string otherwise. + # + # Also sets a unique date and time suffix $TIME_SUFFIX. + - name: Set update and time suffixes + run: | + UPDATE_SUFFIX="" + + if [[ "${{ inputs.needs_zebra_state }}" == "true" ]]; then + UPDATE_SUFFIX="-update" + fi + + TIME_SUFFIX=$(date '+%Y-%m-%d-%H-%M-%S' --utc) + + echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> $GITHUB_ENV + echo "TIME_SUFFIX=$TIME_SUFFIX" >> $GITHUB_ENV + # Create an image from disk that will be used for following/other tests # This image can contain: # - Zebra cached state @@ -966,7 +1007,8 @@ jobs: # used by the container - name: Create image from state disk run: | - gcloud compute images create ${{ inputs.disk_prefix }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }} \ + gcloud compute images create \ + "${{ inputs.disk_prefix }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }}$UPDATE_SUFFIX-$TIME_SUFFIX" \ --force \ --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ --source-disk-zone=${{ env.ZONE }} \ diff --git a/book/src/dev/continuous-integration.md b/book/src/dev/continuous-integration.md new file mode 100644 index 00000000000..16089ff9da4 --- /dev/null +++ b/book/src/dev/continuous-integration.md @@ -0,0 +1,26 @@ +# Zebra Continuous Integration + +Zebra has extensive continuous integration tests for node syncing and `lightwalletd` integration. + +On every PR change, Zebra runs [these Docker tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-docker.yml): +- Zebra update syncs from a cached state Google Cloud tip image +- lightwalletd full syncs from a cached state Google Cloud tip image +- lightwalletd update syncs from a cached state Google Cloud tip image +- lightwalletd integration with Zebra JSON-RPC and Light Wallet gRPC calls + +When a PR is merged to the `main` branch, we also run a Zebra full sync test from genesis. + +Some Docker tests are stateful, they can depend on: +- built Zebra and `lightwalletd` docker images +- cached state images in Google cloud +- jobs that launch Google Cloud instances for each test +- multiple jobs that follow the logs from Google Cloud (to work around the 6 hour GitHub actions limit) +- a final "Run" job that checks the exit status of the Rust acceptance test + +To support this test state, some Docker tests depend on other tests finishing first. + +Currently, each Zebra and lightwalletd sync updates the cached images, which are shared by all tests. +Tests prefer the latest image generated from the same branch and commit. But if they are not available, they will use the latest image from any branch and commit, as long as the state version is the same. + +Zebra also does [a smaller set of tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-os.yml) on tier 2 platforms using GitHub actions runners. + From 5866fc339a1a4bf85b954961bcde3dfbd45b5f01 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 26 Aug 2022 06:23:26 +1000 Subject: [PATCH 12/42] Increase disk and network requirements for long-term deployment (#4948) --- README.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 3e2236e4d1f..7c50fd1a7bc 100644 --- a/README.md +++ b/README.md @@ -105,8 +105,8 @@ cargo install --features= ... The recommended requirements for compiling and running `zebrad` are: - 4+ CPU cores - 16+ GB RAM -- 100 GB+ available disk space for building binaries and storing cached chain state -- 100+ Mbps network connections +- 300 GB+ available disk space for building binaries and storing cached chain state +- 100+ Mbps network connection, with 100+ GB of uploads and downloads per month We continuously test that our builds and tests pass on: @@ -157,7 +157,7 @@ If this is a problem for you, please [open a ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose) `zebrad`'s typical mainnet network usage is: -- Initial sync: 40 GB download (in the longer term, several hundred GB are likely to be downloaded). +- Initial sync: 50 GB download, we expect the initial download to grow to hundreds of gigabytes over time - Ongoing updates: 10 MB - 1 GB upload and download per day, depending on user-created transaction size, and peer requests Zebra also performs an initial sync every time its internal database version changes. @@ -174,11 +174,10 @@ See our [roadmap](#future-work) for details. ### Disk Usage -Zebra uses up to 40 GB of space for cached mainnet data, -and 10 GB of space for cached testnet data. +Zebra uses around 100 GB of space for cached mainnet data, and 10 GB of space for cached testnet data. +We expect disk usage to grow over time, so we recommend reserving at least 300 GB for mainnet nodes. -RocksDB cleans up outdated data periodically, -and when the database is closed and re-opened. +RocksDB cleans up outdated data periodically, and when the database is closed and re-opened. #### Disk Troubleshooting From aa3b0af15c98bc24ae96162494b23cc677710257 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 26 Aug 2022 13:31:10 +1000 Subject: [PATCH 13/42] Fix a regular expression typo in a full sync job (#4950) --- .github/workflows/deploy-gcp-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index f6d54524dac..f21739d239e 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -716,7 +716,7 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)||(estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(test result:.*finished in)' \ + '(estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(test result:.*finished in)' \ " # follow the logs of the test we just launched, up to the last checkpoint (or the test finishing) From 9f2ab39968ff696c8b760315d64ffa3e9b599aed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Aug 2022 15:48:44 +0000 Subject: [PATCH 14/42] build(deps): bump tj-actions/changed-files from 29.0.0 to 29.0.1 (#4959) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 29.0.0 to 29.0.1. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v29.0.0...v29.0.1) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9f1be10a969..668c3229a22 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,7 +32,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v29.0.0 + uses: tj-actions/changed-files@v29.0.1 with: files: | **/*.rs @@ -44,7 +44,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v29.0.0 + uses: tj-actions/changed-files@v29.0.1 with: files: | .github/workflows/*.yml From 326ae04b0f2fb28e9484e58a2936ac02d4565fb0 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 26 Aug 2022 14:06:32 -0400 Subject: [PATCH 15/42] ci(test): run build and test jobs on cargo and clippy config changes (#4941) Previous behavior: If warnings or error are added in `.cargo/config.toml` or `clippy.toml`, and those could generate CI failures, we wouldn't catch those new as the pipelines are not run when this files are changed Expected behavior: If warnings or error are added in `.cargo/config.toml` or `clippy.toml`, run all the builds and test jobs which also track a `Cargo.toml`. Solution: Add `.cargo/config.toml` and `clippy.toml` as paths to all the required jobs which needs to be triggered when these files changes. Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/workflows/build-crates-individually.patch.yml | 3 +++ .github/workflows/build-crates-individually.yml | 6 ++++++ .github/workflows/continous-integration-docker.patch.yml | 3 +++ .github/workflows/continous-integration-docker.yml | 3 +++ .github/workflows/continous-integration-os.patch.yml | 2 ++ .github/workflows/continous-integration-os.yml | 3 +++ .github/workflows/coverage.patch.yml | 3 +++ .github/workflows/coverage.yml | 3 +++ 8 files changed, 26 insertions(+) diff --git a/.github/workflows/build-crates-individually.patch.yml b/.github/workflows/build-crates-individually.patch.yml index f78e69a2731..2fdc44905ac 100644 --- a/.github/workflows/build-crates-individually.patch.yml +++ b/.github/workflows/build-crates-individually.patch.yml @@ -10,6 +10,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 501bd904761..57b25707fe1 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -11,6 +11,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' pull_request: @@ -20,6 +23,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' diff --git a/.github/workflows/continous-integration-docker.patch.yml b/.github/workflows/continous-integration-docker.patch.yml index b96f03e6b01..3f4bd9b3897 100644 --- a/.github/workflows/continous-integration-docker.patch.yml +++ b/.github/workflows/continous-integration-docker.patch.yml @@ -14,6 +14,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'docker/**' - '.github/workflows/continous-integration-docker.yml' diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index e15a46896b1..6fc73bf7773 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -33,6 +33,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'docker/**' - '.github/workflows/continous-integration-docker.yml' diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index ef965a6433a..8f9018b52d2 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -9,6 +9,8 @@ on: - '**/Cargo.toml' - '**/Cargo.lock' - '**/deny.toml' + - '.cargo/config.toml' + - '**/clippy.toml' - '.github/workflows/continous-integration-os.yml' jobs: diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 3389fd22906..0bf90b8daab 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -17,6 +17,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/ci.yml' pull_request: diff --git a/.github/workflows/coverage.patch.yml b/.github/workflows/coverage.patch.yml index fdf9a5f6a7a..241f92e73e5 100644 --- a/.github/workflows/coverage.patch.yml +++ b/.github/workflows/coverage.patch.yml @@ -8,6 +8,9 @@ on: - '**/*.snap' - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' - 'codecov.yml' - '.github/workflows/coverage.yml' diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ae28188d5a4..230afd6847e 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -15,6 +15,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'codecov.yml' - '.github/workflows/coverage.yml' From 1d861b0d20b79cb9d2e9f66e54cd0f0fd5860a50 Mon Sep 17 00:00:00 2001 From: teor Date: Sun, 28 Aug 2022 05:42:20 +1000 Subject: [PATCH 16/42] fix(ci): Increase full sync timeouts for longer syncs (#4961) * Increase full sync timeout to 24 hours Expected sync time is ~21 hours as of August 2022. * Split final checkpoint job into two smaller jobs to avoid timeouts Also make regexes easier to read. * Fix a job name typo --- .github/workflows/deploy-gcp-tests.yml | 86 +++++++++++++++++++++++--- zebrad/tests/common/sync.rs | 4 +- 2 files changed, 81 insertions(+), 9 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index f21739d239e..e8da122c6a2 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -547,7 +547,12 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Sapling)|(estimated progress.*network_upgrade.*=.*Blossom)|(estimated progress.*network_upgrade.*=.*Heartwood)|(estimated progress.*network_upgrade.*=.*Canopy)|(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Sapling' \ + -e 'estimated progress.*network_upgrade.*=.*Blossom' \ + -e 'estimated progress.*network_upgrade.*=.*Heartwood' \ + -e 'estimated progress.*network_upgrade.*=.*Canopy' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to Canopy activation (or the test finishing) @@ -602,7 +607,9 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Canopy)|(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Canopy' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to NU5 activation (or the test finishing) @@ -657,14 +664,14 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to block 1,740,000 or later # (or the test finishing) # # We chose this height because it was about 5 hours into the NU5 sync, at the end of July 2022. - # This is a temporary workaround until we improve sync speeds. logs-1740k: name: Log ${{ inputs.test_id }} test (1740k) needs: [ logs-canopy ] @@ -716,13 +723,77 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(test result:.*finished in)' \ + -e 'estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ + " + + # follow the logs of the test we just launched, up to block 1,760,000 or later + # (or the test finishing) + # + # We chose this height because it was about 9 hours into the NU5 sync, at the end of August 2022. + logs-1760k: + name: Log ${{ inputs.test_id }} test (1760k) + needs: [ logs-1740k ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + retries: '3' + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Show recent logs, following until block 1,760,000 (or the test finishes) + - name: Show logs for ${{ inputs.test_id }} test (1760k) + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'estimated progress.*current_height.*=.*17[6-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to the last checkpoint (or the test finishing) logs-checkpoint: name: Log ${{ inputs.test_id }} test (checkpoint) - needs: [ logs-1740k ] + needs: [ logs-1760k ] # If the previous job fails, we still want to show the logs. if: ${{ !cancelled() }} runs-on: ubuntu-latest @@ -773,7 +844,8 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(verified final checkpoint)|(test result:.*finished in)' \ + -e 'verified final checkpoint' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, until it finishes diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index a9a8d283bd8..494f79ddce8 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -363,8 +363,8 @@ pub fn create_cached_database_height( ) -> Result<()> { eprintln!("creating cached database"); - // 20 hours - let timeout = Duration::from_secs(60 * 60 * 20); + // 24 hours + let timeout = Duration::from_secs(24 * 60 * 60); // Use a persistent state, so we can handle large syncs let mut config = cached_mandatory_checkpoint_test_config()?; From 156fc2b93dae893a5286863a7fa0ec806e25cf89 Mon Sep 17 00:00:00 2001 From: teor Date: Sun, 28 Aug 2022 09:12:45 +1000 Subject: [PATCH 17/42] Update disk usage based on recent data (#4963) --- README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 7c50fd1a7bc..fe669b66f89 100644 --- a/README.md +++ b/README.md @@ -103,10 +103,10 @@ cargo install --features= ... ### System Requirements The recommended requirements for compiling and running `zebrad` are: -- 4+ CPU cores -- 16+ GB RAM -- 300 GB+ available disk space for building binaries and storing cached chain state -- 100+ Mbps network connection, with 100+ GB of uploads and downloads per month +- 4 CPU cores +- 16 GB RAM +- 300 GB available disk space for building binaries and storing cached chain state +- 100 Mbps network connection, with 300 GB of uploads and downloads per month We continuously test that our builds and tests pass on: @@ -157,10 +157,11 @@ If this is a problem for you, please [open a ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose) `zebrad`'s typical mainnet network usage is: -- Initial sync: 50 GB download, we expect the initial download to grow to hundreds of gigabytes over time -- Ongoing updates: 10 MB - 1 GB upload and download per day, depending on user-created transaction size, and peer requests +- Initial sync: 100 GB download, we expect the initial download to grow to hundreds of gigabytes over time +- Ongoing updates: 10 MB - 10 GB upload and download per day, depending on user-created transaction size and peer requests -Zebra also performs an initial sync every time its internal database version changes. +Zebra performs an initial sync every time its internal database version changes, +so some version upgrades might require a full download of the whole chain. For more detailed information, refer to the [documentation](https://zebra.zfnd.org/user/run.html). @@ -177,7 +178,7 @@ See our [roadmap](#future-work) for details. Zebra uses around 100 GB of space for cached mainnet data, and 10 GB of space for cached testnet data. We expect disk usage to grow over time, so we recommend reserving at least 300 GB for mainnet nodes. -RocksDB cleans up outdated data periodically, and when the database is closed and re-opened. +Zebra's database cleans up outdated data periodically, and when Zebra is shut down and restarted. #### Disk Troubleshooting From 6fd3cdb3dab7eb3bab715f190039268938e07c04 Mon Sep 17 00:00:00 2001 From: teor Date: Sun, 28 Aug 2022 19:47:42 +1000 Subject: [PATCH 18/42] fix(ci): Expand cached state disks before running tests (#4962) * Expand cached state disks before running tests * Install partition management tool * There isn't actually a partition on the cached state image * Make e2fsck non-interactive * Limit the length of image names to 63 characters * Ignore possibly long branch names when matching images, just match the commit --- .github/workflows/deploy-gcp-tests.yml | 61 +++++++++++++++++++------- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index e8da122c6a2..18d2261ad34 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -148,6 +148,9 @@ jobs: --zone ${{ env.ZONE }} sleep 60 + # Create a docker volume with the new disk we just created. + # + # SSH into the just created VM, and create a docker volume with the newly created disk. - name: Create ${{ inputs.test_id }} Docker volume run: | gcloud compute ssh \ @@ -157,7 +160,7 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ - sudo mkfs.ext4 /dev/sdb \ + sudo mkfs.ext4 -v /dev/sdb \ && \ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -285,11 +288,16 @@ jobs: DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }} fi - # Try to find an image generated from this branch and commit - # Fields are listed in the "Create image from state disk" step - COMMIT_DISK_PREFIX="${DISK_PREFIX}-${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" + # Try to find an image generated from a previous step or run of this commit. + # Fields are listed in the "Create image from state disk" step. + # + # We can't match the full branch name here, + # because it might have been shortened for the image. + # + # The probability of two matching short commit hashes within the same month is very low. + COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT} Disk: $COMMIT_CACHED_DISK_NAME" + echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME" if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')" fi @@ -351,7 +359,10 @@ jobs: # Create a docker volume with the selected cached state. # - # SSH into the just created VM, and create a docker volume with the recently attached disk. + # SSH into the just created VM, expand the partition and filesystem to fill the entire disk, + # then create a docker volume with the recently attached disk. + # (The cached state and disk are usually the same size, + # but the cached state can be smaller if we just increased the disk size.) - name: Create ${{ inputs.test_id }} Docker volume run: | gcloud compute ssh \ @@ -361,6 +372,10 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ + sudo e2fsck -v -f -p /dev/sdb \ + && \ + sudo resize2fs -p /dev/sdb \ + && \ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ " @@ -1000,14 +1015,22 @@ jobs: with: short-length: 7 + # Performs formatting on disk name components. + # # Disk images in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network }} + # uses sentence case, so we need to downcase ${{ inputs.network }}. # - # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable - - name: Downcase network name for disks + # Disk image names in GCP are limited to 63 characters, so we need to limit + # branch names to 13 characters. + # + # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. + # Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. + - name: Format network name and branch name for disks run: | NETWORK_CAPS=${{ inputs.network }} echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + LONG_GITHUB_REF=${{ env.GITHUB_REF_SLUG_URL }} + echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:13}" >> $GITHUB_ENV # Setup gcloud CLI - name: Authenticate to Google Cloud @@ -1052,7 +1075,7 @@ jobs: SYNC_HEIGHT=$(echo $DOCKER_LOGS | grep -oE '${{ inputs.height_grep_text }}\([0-9]+\)' | grep -oE '[0-9]+' | tail -1 || [[ $? == 1 ]]) echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> $GITHUB_ENV - # Sets the $UPDATE_SUFFIX env var to "-update" if using cached state, + # Sets the $UPDATE_SUFFIX env var to "-u" if using cached state, # and the empty string otherwise. # # Also sets a unique date and time suffix $TIME_SUFFIX. @@ -1061,26 +1084,32 @@ jobs: UPDATE_SUFFIX="" if [[ "${{ inputs.needs_zebra_state }}" == "true" ]]; then - UPDATE_SUFFIX="-update" + UPDATE_SUFFIX="-u" fi - TIME_SUFFIX=$(date '+%Y-%m-%d-%H-%M-%S' --utc) + # We're going to delete old images after a month, so we don't need the year here + TIME_SUFFIX=$(date '+%m%d%H%M%S' --utc) echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> $GITHUB_ENV echo "TIME_SUFFIX=$TIME_SUFFIX" >> $GITHUB_ENV - # Create an image from disk that will be used for following/other tests + # Create an image from disk that will be used for following/other tests. + # # This image can contain: # - Zebra cached state # - Zebra + lightwalletd cached state - # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }} + # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }}. + # + # The image name must be unique, and be 63 characters or less. + # The timestamp makes images from the same commit unique, + # as long as they don't finish in the same second. # # Force the image creation (--force) as the disk is still attached even though is not being - # used by the container + # used by the container. - name: Create image from state disk run: | gcloud compute images create \ - "${{ inputs.disk_prefix }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }}$UPDATE_SUFFIX-$TIME_SUFFIX" \ + "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ --force \ --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ --source-disk-zone=${{ env.ZONE }} \ From 8227dabe56f3964f4624e45005ff2eb924c5a02a Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Sun, 28 Aug 2022 08:46:21 -0400 Subject: [PATCH 19/42] ci(build): deploy long running node instances on release (#4939) * feat(build): deploy long running instances on release Previous behavior: Each time we merged to main new nodes would be deployed, this is an expected behavior as we need to ensure nodes get deployed and run without issues, but this could also replace nodes very hastily. Expected behavior: We want instances which would run for a longer time, to allow us to troubleshoot issues or inspect the behavior of this instances for longer periods of time (2+ weeks) Applied solution: Deploy a versioned manage instance group (MiG) using the major version of the release semver. We just use the first part of the version to replace old instances, and change it when a major version is released to keep a segregation between new and old versions. * ci(build): allow v0 as a major version tag * fix(build): use rust conventions for versioning * fix(deploy): improve documentation and trigger on release * Update .github/workflows/continous-delivery.yml Co-authored-by: teor * fix(versioning): typo * fix(deploy): use `zebrad-v1` as the instance name, with no SHA * fix(deploy): create and update MiG must use the same name * docs(deployments): add Continuous Delivery process Co-authored-by: teor Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/workflows/build-docker-image.yml | 9 ++- .github/workflows/continous-delivery.yml | 71 ++++++++++++++++++++---- book/src/dev/continous-delivery.md | 28 ++++++++++ 3 files changed, 97 insertions(+), 11 deletions(-) create mode 100644 book/src/dev/continous-delivery.md diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 9b337a1a29d..665a3fee61b 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -37,12 +37,19 @@ on: required: false type: string default: info + outputs: + image_digest: + description: 'The image digest to be used on a caller workflow' + value: ${{ jobs.build.outputs.image_digest }} jobs: build: name: Build images timeout-minutes: 210 runs-on: ubuntu-latest + outputs: + image_digest: ${{ steps.docker_build.outputs.digest }} + image_name: ${{ fromJSON(steps.docker_build.outputs.metadata)['image.name'] }} permissions: contents: 'read' id-token: 'write' @@ -67,12 +74,12 @@ jobs: # generate Docker tags based on the following events/attributes tags: | type=schedule + type=sha type=ref,event=branch type=ref,event=pr type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} - type=sha # Setup Docker Buildx to allow use of docker cache layers from GH - name: Set up Docker Buildx diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 51401205f83..b896d1ce2aa 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -14,6 +14,9 @@ on: push: branches: - main + release: + types: + - published env: NETWORK: Mainnet @@ -23,6 +26,36 @@ env: MACHINE_TYPE: c2-standard-4 jobs: + # If a release was made we want to extract the first part of the semver from the + # tag_name + # + # Generate the following output to pass to subsequent jobs + # - If our semver is `v1.3.0` the resulting output from this job would be `v1` + # + # Note: We just use the first part of the version to replace old instances, and change + # it when a major version is released, to keep a segregation between new and old + # versions. + versioning: + name: Versioning + runs-on: ubuntu-latest + outputs: + major_version: ${{ steps.set.outputs.major_version }} + steps: + - name: Getting Zebrad Version + id: get + uses: actions/github-script@v6.1.0 + with: + result-encoding: string + script: | + return context.payload.release.tag_name.substring(0,2) + - name: Setting API Version + id: set + run: echo "::set-output name=major_version::${{ steps.get.outputs.result }}" + + # Each time this workflow is executed, a build will be triggered to create a new image + # with the corresponding tags using information from Git + # + # The image will be commonly named `zebrad:` build: uses: ./.github/workflows/build-docker-image.yml with: @@ -35,15 +68,26 @@ jobs: zebra_skip_ipv6_tests: '1' rust_log: info + # This jobs handles the deployment of a Managed Instance Group (MiG) with 2 nodes in + # the us-central1 region. Two different groups of MiGs are deployed one for pushes to + # the main branch and another for version releases of Zebra + # + # Once this workflow is triggered the previous MiG is replaced, on pushes to main its + # always replaced, and with releases its only replaced if the same major version is + # being deployed, otherwise a new major version is deployed + # + # Runs: + # - on every push/merge to the `main` branch + # - on every release, when it's published deploy-nodes: name: Deploy Mainnet nodes - needs: build + needs: [ build, versioning ] runs-on: ubuntu-latest timeout-minutes: 30 permissions: contents: 'read' id-token: 'write' - if: ${{ github.event_name == 'push' && github.ref_name == 'main' }} + if: ${{ (github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' }} steps: - name: Inject slug/short variables @@ -63,9 +107,9 @@ jobs: - name: Create instance template run: | - gcloud compute instance-templates create-with-container zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --boot-disk-type=pd-ssd \ - --container-image ${{ env.GAR_BASE }}/${{ env.GITHUB_REF_SLUG_URL }}:${{ env.GITHUB_SHA_SHORT }} \ + --container-image ${{ env.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ --create-disk name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }},auto-delete=yes,size=100GB,type=pd-ssd \ --container-mount-disk mount-path="/zebrad-cache",name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }} \ --machine-type ${{ env.MACHINE_TYPE }} \ @@ -77,15 +121,15 @@ jobs: id: does-group-exist continue-on-error: true run: | - gcloud compute instance-groups list | grep "zebrad-${{ env.GITHUB_REF_SLUG_URL }}" | grep "${{ env.REGION }}" + gcloud compute instance-groups list | grep "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" | grep "${{ env.REGION }}" # Deploy new managed instance group using the new instance template - name: Create managed instance group if: steps.does-group-exist.outcome == 'failure' run: | gcloud compute instance-groups managed create \ - "zebrad-${{ env.GITHUB_REF_SLUG_URL }}" \ - --template "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" \ + --template "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --health-check zebrad-tracing-filter \ --initial-delay 30 \ --region "${{ env.REGION }}" \ @@ -96,10 +140,17 @@ jobs: if: steps.does-group-exist.outcome == 'success' run: | gcloud compute instance-groups managed rolling-action start-update \ - "zebrad-${{ env.GITHUB_REF_SLUG_URL }}" \ - --version template="zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" \ + --version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --region "${{ env.REGION }}" + # This jobs handles the deployment of a single node (1) in the us-central1-a zone + # when an instance is required to test a specific commit + # + # Runs: + # - on request, using workflow_dispatch with regenerate-disks + # + # Note: this instances are not automatically replaced or deleted deploy-instance: name: Deploy single instance needs: build @@ -134,7 +185,7 @@ jobs: --boot-disk-type=pd-ssd \ --container-stdin \ --container-tty \ - --container-image ${{ env.GAR_BASE }}/${{ env.GITHUB_REF_SLUG_URL }}:${{ env.GITHUB_SHA_SHORT }} \ + --container-image ${{ env.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ --create-disk name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }},auto-delete=yes,size=100GB,type=pd-ssd \ --container-mount-disk mount-path='/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }} \ --machine-type ${{ env.MACHINE_TYPE }} \ diff --git a/book/src/dev/continous-delivery.md b/book/src/dev/continous-delivery.md new file mode 100644 index 00000000000..e3592a145b5 --- /dev/null +++ b/book/src/dev/continous-delivery.md @@ -0,0 +1,28 @@ +# Zebra Continuous Delivery + +Zebra has an extension of it's continuous integration since it automatically deploys all +code changes to a testing and/or pre-production environment after each PR gets merged +into the `main` branch, and on each Zebra `release`. + +## Triggers + +The Continuous delivery pipeline is triggered when: + +* A PR is merged to `main` (technically, a `push` event) +* A new release is published in GitHub + +## Deployments + +On each trigger Zebra is deployed using the branch or version references as part of +the deployment naming convention. Deployments are made using [Managed Instance Groups (MIGs)](https://cloud.google.com/compute/docs/instance-groups#managed_instance_groups) +from Google Cloud Platform with, 2 nodes in the us-central1 region. + +**Note**: These *MIGs* are always replaced when PRs are merged to the `main` branch and +when a release is published. If a new major version is released, a new *MIG* is also +created, keeping the previous major version running until it's no longer needed. + +A single instance can also be deployed, on an on-demand basis, if required, when a +long-lived instance, with specific changes, is needed to be tested in the Mainnet with +the same infrastructure used for CI & CD. + +Further validations of the actual process can be done on our continuous delivery [workflow file](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-delivery.yml). From e97350891168984a7b7b079922424b8c980092a0 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 29 Aug 2022 03:08:27 +1000 Subject: [PATCH 20/42] build(deps): bump chrono from 0.4.19 to 0.4.20 (#4898) * Bump chrono to 0.4.20 * Fix clippy::assign_op_pattern * Update deprecated constant names * Drop old `time` crate dependency from `chrono` Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 7 +- zebra-chain/Cargo.toml | 4 +- .../network_chain_tip_height_estimator.rs | 2 +- zebra-chain/src/serialization/arbitrary.rs | 4 +- zebra-consensus/Cargo.toml | 2 +- zebra-consensus/src/transaction/tests.rs | 77 +++++++++---------- zebra-consensus/src/transaction/tests/prop.rs | 7 +- zebra-network/Cargo.toml | 2 +- zebra-network/src/protocol/external/codec.rs | 18 +++-- zebra-rpc/Cargo.toml | 12 +-- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 12 files changed, 71 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1943aea5a7..ff6668db39b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -740,15 +740,16 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "6127248204b9aba09a362f6c930ef6a78f2c1b2215f8a7b398c06e1083f17af0" dependencies = [ - "libc", + "js-sys", "num-integer", "num-traits", "serde", "time 0.1.44", + "wasm-bindgen", "winapi", ] diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index df7aa486ab6..32c2997074b 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -35,7 +35,7 @@ rand_core = "0.6.3" ripemd = "0.1.1" # Matches version used by hdwallet secp256k1 = { version = "0.21.3", features = ["serde"] } -sha2 = { version = "0.9.9", features=["compress"] } +sha2 = { version = "0.9.9", features = ["compress"] } subtle = "2.4.1" uint = "0.9.1" x25519-dalek = { version = "1.2.0", features = ["serde"] } @@ -49,7 +49,7 @@ zcash_note_encryption = "0.1" zcash_primitives = { version = "0.7.0", features = ["transparent-inputs"] } # Time -chrono = { version = "0.4.19", features = ["serde"] } +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std", "serde"] } humantime = "2.1.0" # Error Handling & Formatting diff --git a/zebra-chain/src/chain_tip/network_chain_tip_height_estimator.rs b/zebra-chain/src/chain_tip/network_chain_tip_height_estimator.rs index 3a310524da0..5d4e1f59237 100644 --- a/zebra-chain/src/chain_tip/network_chain_tip_height_estimator.rs +++ b/zebra-chain/src/chain_tip/network_chain_tip_height_estimator.rs @@ -93,7 +93,7 @@ impl NetworkChainTipHeightEstimator { let target_spacing_seconds = self.current_target_spacing.num_seconds(); let time_to_activation = Duration::seconds(remaining_blocks * target_spacing_seconds); - self.current_block_time = self.current_block_time + time_to_activation; + self.current_block_time += time_to_activation; self.current_height = max_height; } } diff --git a/zebra-chain/src/serialization/arbitrary.rs b/zebra-chain/src/serialization/arbitrary.rs index 953e8643c16..9c432475891 100644 --- a/zebra-chain/src/serialization/arbitrary.rs +++ b/zebra-chain/src/serialization/arbitrary.rs @@ -2,7 +2,7 @@ use std::convert::TryInto; -use chrono::{TimeZone, Utc, MAX_DATETIME, MIN_DATETIME}; +use chrono::{DateTime, TimeZone, Utc}; use proptest::{arbitrary::any, prelude::*}; use super::{ @@ -41,7 +41,7 @@ impl Arbitrary for DateTime32 { pub fn datetime_full() -> impl Strategy> { ( // TODO: should we be subtracting 1 from the maximum timestamp? - MIN_DATETIME.timestamp()..=MAX_DATETIME.timestamp(), + DateTime::::MIN_UTC.timestamp()..=DateTime::::MAX_UTC.timestamp(), 0..2_000_000_000_u32, ) .prop_map(|(secs, nsecs)| Utc.timestamp(secs, nsecs)) diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index a0624f98828..c8e55223f77 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -18,7 +18,7 @@ jubjub = "0.9.0" rand = { version = "0.8.5", package = "rand" } rayon = "1.5.3" -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } dirs = "4.0.0" displaydoc = "0.2.3" lazy_static = "1.4.0" diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index bae2d326c78..21f579884cd 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -1,11 +1,9 @@ //! Tests for Zcash transaction consensus checks. -use std::{ - collections::HashMap, - convert::{TryFrom, TryInto}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; +use chrono::{DateTime, Utc}; +use color_eyre::eyre::Report; use halo2::pasta::{group::ff::PrimeField, pallas}; use tower::{service_fn, ServiceExt}; @@ -27,10 +25,9 @@ use zebra_chain::{ transparent::{self, CoinbaseData}, }; -use super::{check, Request, Verifier}; - use crate::error::TransactionError; -use color_eyre::eyre::Report; + +use super::{check, Request, Verifier}; #[cfg(test)] mod prop; @@ -264,7 +261,7 @@ async fn v5_transaction_is_rejected_before_nu5_activation() { height: canopy .activation_height(network) .expect("Canopy activation height is specified"), - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -327,7 +324,7 @@ fn v5_transaction_is_accepted_after_nu5_activation_for_network(network: Network) transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height: expiry_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -377,7 +374,7 @@ async fn v4_transaction_with_transparent_transfer_is_accepted() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -416,7 +413,7 @@ async fn v4_transaction_with_last_valid_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -461,7 +458,7 @@ async fn v4_coinbase_transaction_with_low_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -503,7 +500,7 @@ async fn v4_transaction_with_too_low_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -548,7 +545,7 @@ async fn v4_transaction_with_exceeding_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -601,7 +598,7 @@ async fn v4_coinbase_transaction_with_exceeding_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -652,7 +649,7 @@ async fn v4_coinbase_transaction_is_accepted() { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -702,7 +699,7 @@ async fn v4_transaction_with_transparent_transfer_is_rejected_by_the_script() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -752,7 +749,7 @@ async fn v4_transaction_with_conflicting_transparent_spend_is_rejected() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -818,7 +815,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_inside_joinsplit_is_rejected transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -889,7 +886,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_across_joinsplits_is_rejecte transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -943,7 +940,7 @@ async fn v5_transaction_with_transparent_transfer_is_accepted() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -983,7 +980,7 @@ async fn v5_transaction_with_last_valid_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1026,7 +1023,7 @@ async fn v5_coinbase_transaction_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1047,7 +1044,7 @@ async fn v5_coinbase_transaction_expiry_height() { transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1072,7 +1069,7 @@ async fn v5_coinbase_transaction_expiry_height() { transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1099,7 +1096,7 @@ async fn v5_coinbase_transaction_expiry_height() { transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: new_expiry_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1141,7 +1138,7 @@ async fn v5_transaction_with_too_low_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1187,7 +1184,7 @@ async fn v5_transaction_with_exceeding_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1241,7 +1238,7 @@ async fn v5_coinbase_transaction_is_accepted() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1293,7 +1290,7 @@ async fn v5_transaction_with_transparent_transfer_is_rejected_by_the_script() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1345,7 +1342,7 @@ async fn v5_transaction_with_conflicting_transparent_spend_is_rejected() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1390,7 +1387,7 @@ fn v4_with_signed_sprout_transfer_is_accepted() { transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1463,7 +1460,7 @@ async fn v4_with_joinsplit_is_rejected_for_modification( transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1499,7 +1496,7 @@ fn v4_with_sapling_spends() { transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1542,7 +1539,7 @@ fn v4_with_duplicate_sapling_spends() { transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1587,7 +1584,7 @@ fn v4_with_sapling_outputs_and_no_spends() { transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1636,7 +1633,7 @@ fn v5_with_sapling_spends() { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1680,7 +1677,7 @@ fn v5_with_duplicate_sapling_spends() { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1743,7 +1740,7 @@ fn v5_with_duplicate_orchard_action() { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; diff --git a/zebra-consensus/src/transaction/tests/prop.rs b/zebra-consensus/src/transaction/tests/prop.rs index e243dd4fa98..aaddb3649d6 100644 --- a/zebra-consensus/src/transaction/tests/prop.rs +++ b/zebra-consensus/src/transaction/tests/prop.rs @@ -1,6 +1,6 @@ //! Randomised property tests for transaction verification. -use std::{collections::HashMap, convert::TryInto, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use chrono::{DateTime, Duration, Utc}; use proptest::{collection::vec, prelude::*}; @@ -14,9 +14,10 @@ use zebra_chain::{ transparent, }; -use super::mock_transparent_transfer; use crate::{error::TransactionError, transaction}; +use super::mock_transparent_transfer; + /// The maximum number of transparent inputs to include in a mock transaction. const MAX_TRANSPARENT_INPUTS: usize = 10; @@ -204,7 +205,7 @@ proptest! { (first_datetime, second_datetime) } else if first_datetime > second_datetime { (second_datetime, first_datetime) - } else if first_datetime == chrono::MAX_DATETIME { + } else if first_datetime == DateTime::::MAX_UTC { (first_datetime - Duration::nanoseconds(1), first_datetime) } else { (first_datetime, first_datetime + Duration::nanoseconds(1)) diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index e40e8015ea3..f9c7930e8ff 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -16,7 +16,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] bitflags = "1.3.2" byteorder = "1.4.3" bytes = "1.2.1" -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } hex = "0.4.3" humantime-serde = "1.1.1" indexmap = { version = "1.9.1", features = ["serde"] } diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index f007b73c648..1c6de9b8f05 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -2,7 +2,6 @@ use std::{ cmp::min, - convert::TryInto, fmt, io::{Cursor, Read, Write}, }; @@ -724,15 +723,18 @@ impl Codec { } } -// XXX replace these interior unit tests with exterior integration tests + proptest +// TODO: +// - move these unit tests to a separate file +// - add exterior integration tests + proptest #[cfg(test)] mod tests { - use super::*; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use chrono::{MAX_DATETIME, MIN_DATETIME}; + use chrono::DateTime; use futures::prelude::*; use lazy_static::lazy_static; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use super::*; lazy_static! { static ref VERSION_TEST_VECTOR: Message = { @@ -808,8 +810,10 @@ mod tests { deserialize_version_with_time(1620777600).expect("recent time is valid"); deserialize_version_with_time(0).expect("zero time is valid"); - deserialize_version_with_time(MIN_DATETIME.timestamp()).expect("min time is valid"); - deserialize_version_with_time(MAX_DATETIME.timestamp()).expect("max time is valid"); + deserialize_version_with_time(DateTime::::MIN_UTC.timestamp()) + .expect("min time is valid"); + deserialize_version_with_time(DateTime::::MAX_UTC.timestamp()) + .expect("max time is valid"); } /// Deserialize a `Version` message containing `time`, and return the result. diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index d14f2c613eb..6bd3b1ff83a 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -12,12 +12,7 @@ default = [] proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl", "zebra-state/proptest-impl"] [dependencies] -zebra-chain = { path = "../zebra-chain" } -zebra-network = { path = "../zebra-network" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-state = { path = "../zebra-state" } - -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } futures = "0.3.21" # lightwalletd sends JSON-RPC requests over HTTP 1.1 @@ -42,6 +37,11 @@ serde = { version = "1.0.142", features = ["serde_derive"] } proptest = { version = "0.10.1", optional = true } proptest-derive = { version = "0.3.0", optional = true } +zebra-chain = { path = "../zebra-chain" } +zebra-network = { path = "../zebra-network" } +zebra-node-services = { path = "../zebra-node-services" } +zebra-state = { path = "../zebra-state" } + [dev-dependencies] insta = { version = "1.17.1", features = ["redactions"] } proptest = "0.10.1" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 69345ddc391..82df95dfcc0 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -10,7 +10,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-test", "zebra-chain/propt [dependencies] bincode = "1.3.3" -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } dirs = "4.0.0" displaydoc = "0.2.3" futures = "0.3.21" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 6c979d45c26..ba068e3895f 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -77,7 +77,7 @@ zebra-state = { path = "../zebra-state" } abscissa_core = "0.5" gumdrop = "0.7" -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } humantime = "2.1.0" humantime-serde = "1.1.1" indexmap = "1.9.1" From 4cda4eef665d71a1e8d34b2aad38a650d0d2737a Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 29 Aug 2022 03:08:43 +1000 Subject: [PATCH 21/42] fix(ci): Improve Zebra acceptance test diagnostics (#4958) * Show the arguments of acceptance test functions in the logs * Show all the logs in the "Run tests" jobs * Document expected "broken pipe" error from `tee` Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/workflows/deploy-gcp-tests.yml | 9 ++++++--- zebrad/tests/acceptance.rs | 11 +++++++++-- zebrad/tests/common/cached_state.rs | 3 +++ zebrad/tests/common/launch.rs | 3 ++- .../common/lightwalletd/send_transaction_test.rs | 3 +++ zebrad/tests/common/lightwalletd/wallet_grpc.rs | 2 ++ zebrad/tests/common/sync.rs | 4 ++++ 7 files changed, 29 insertions(+), 6 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 18d2261ad34..2a062cbf709 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -546,6 +546,9 @@ jobs: # following until Sapling activation (or the test finishes). # # The log pipeline ignores the exit status of `docker logs`. + # It also ignores the expected 'broken pipe' error from `tee`, + # which happens when `grep` finds a matching output and moves on to the next job. + # # Errors in the tests are caught by the final test status job. - name: Show logs for ${{ inputs.test_id }} test (sprout) run: | @@ -919,7 +922,7 @@ jobs: " - # check the results of the test + # check the results of the test, and show all of the test logs test-result: # TODO: update the job name here, and in the branch protection rules name: Run ${{ inputs.test_id }} test @@ -959,7 +962,7 @@ jobs: # Check that the container executed at least 1 Rust test harness test, and that all tests passed. # Then wait for the container to finish, and exit with the test's exit status. - # Also shows recent test logs. + # Also shows all the test logs. # # If the container has already finished, `docker wait` should return its status. # But sometimes this doesn't work, so we use `docker inspect` as a fallback. @@ -977,7 +980,7 @@ jobs: --command=' \ set -e; docker logs \ - --tail ${{ env.EXTRA_LOG_LINES }} \ + --tail all \ ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 94b042f78cd..58bc9f3b1a0 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -354,6 +354,7 @@ fn misconfigured_ephemeral_missing_directory() -> Result<()> { ) } +#[tracing::instrument] fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) -> Result<()> { use std::io::ErrorKind; @@ -520,6 +521,7 @@ fn config_test() -> Result<()> { } /// Test that `zebrad start` can parse the output from `zebrad generate`. +#[tracing::instrument] fn valid_generated_config(command: &str, expect_stdout_line_contains: &str) -> Result<()> { let _init_guard = zebra_test::init(); @@ -823,6 +825,7 @@ fn sync_large_checkpoints_mempool_mainnet() -> Result<()> { .map(|_tempdir| ()) } +#[tracing::instrument] fn create_cached_database(network: Network) -> Result<()> { let height = network.mandatory_checkpoint_height(); let checkpoint_stop_regex = format!("{}.*CommitFinalized request", STOP_AT_HEIGHT_REGEX); @@ -839,6 +842,7 @@ fn create_cached_database(network: Network) -> Result<()> { ) } +#[tracing::instrument] fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> { let height = network.mandatory_checkpoint_height() + 1200; let full_validation_stop_regex = @@ -862,6 +866,7 @@ fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> { /// `timeout_argument_name` parameter. The value of the environment variable must the number of /// minutes specified as an integer. #[allow(clippy::print_stderr)] +#[tracing::instrument] fn full_sync_test(network: Network, timeout_argument_name: &str) -> Result<()> { let timeout_argument: Option = env::var(timeout_argument_name) .ok() @@ -1284,6 +1289,7 @@ async fn lightwalletd_test_suite() -> Result<()> { /// Set `FullSyncFromGenesis { allow_lightwalletd_cached_state: true }` to speed up manual full sync tests. /// /// The random ports in this test can cause [rare port conflicts.](#Note on port conflict) +#[tracing::instrument] fn lightwalletd_integration_test(test_type: LightwalletdTestType) -> Result<()> { let _init_guard = zebra_test::init(); @@ -1686,6 +1692,7 @@ fn zebra_state_conflict() -> Result<()> { /// `second_dir`. Check that the first node's stdout contains /// `first_stdout_regex`, and the second node's stderr contains /// `second_stderr_regex`. +#[tracing::instrument] fn check_config_conflict( first_dir: T, first_stdout_regex: &str, @@ -1693,8 +1700,8 @@ fn check_config_conflict( second_stderr_regex: &str, ) -> Result<()> where - T: ZebradTestDirExt, - U: ZebradTestDirExt, + T: ZebradTestDirExt + std::fmt::Debug, + U: ZebradTestDirExt + std::fmt::Debug, { // Start the first node let mut node1 = first_dir.spawn_child(args!["start"])?; diff --git a/zebrad/tests/common/cached_state.rs b/zebrad/tests/common/cached_state.rs index 8739491d108..3323c3ac716 100644 --- a/zebrad/tests/common/cached_state.rs +++ b/zebrad/tests/common/cached_state.rs @@ -25,6 +25,7 @@ pub type BoxStateService = BoxService; /// Starts a state service using the provided `cache_dir` as the directory with the chain state. +#[tracing::instrument(skip(cache_dir))] pub async fn start_state_service_with_cache_dir( network: Network, cache_dir: impl Into, @@ -47,6 +48,7 @@ pub async fn start_state_service_with_cache_dir( } /// Loads the chain tip height from the state stored in a specified directory. +#[tracing::instrument] pub async fn load_tip_height_from_state_directory( network: Network, state_path: &Path, @@ -87,6 +89,7 @@ pub async fn copy_state_directory(source: impl AsRef) -> Result { /// /// Copies all files from the `directory` into the destination specified by the concatenation of /// the `base_destination_path` and `directory` stripped of its `prefix`. +#[tracing::instrument] async fn copy_directory( directory: &Path, prefix: &Path, diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index 3c27a3039df..490bcd8bece 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -206,7 +206,8 @@ where /// /// This prevents it from downloading blocks. Instead, the `zebra_directory` parameter allows /// providing an initial state to the zebrad instance. -pub fn spawn_zebrad_for_rpc_without_initial_peers( +#[tracing::instrument] +pub fn spawn_zebrad_for_rpc_without_initial_peers( network: Network, zebra_directory: P, test_type: LightwalletdTestType, diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index 56639f67141..db638204c67 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -145,6 +145,7 @@ pub async fn run() -> Result<()> { /// /// Returns a list of valid transactions that are not in any of the blocks present in the /// original `zebrad_state_path`. +#[tracing::instrument] async fn load_transactions_from_a_future_block( network: Network, zebrad_state_path: PathBuf, @@ -179,6 +180,7 @@ async fn load_transactions_from_a_future_block( /// /// If the specified `zebrad_state_path` contains a chain state that's not synchronized to a tip that's /// after `height`. +#[tracing::instrument] async fn load_transactions_from_block_after( height: block::Height, network: Network, @@ -213,6 +215,7 @@ async fn load_transactions_from_block_after( /// Performs a request to the provided read-only `state` service to fetch all transactions from a /// block at the specified `height`. +#[tracing::instrument(skip(state))] async fn load_transactions_from_block( height: block::Height, state: &mut ReadStateService, diff --git a/zebrad/tests/common/lightwalletd/wallet_grpc.rs b/zebrad/tests/common/lightwalletd/wallet_grpc.rs index 78d575f6c9d..edfed7c5b3e 100644 --- a/zebrad/tests/common/lightwalletd/wallet_grpc.rs +++ b/zebrad/tests/common/lightwalletd/wallet_grpc.rs @@ -25,6 +25,7 @@ pub type LightwalletdRpcClient = /// Waits for `lightwalletd` to sync to near the tip, if `wait_for_sync` is true. /// /// Returns the lightwalletd instance and the port number that it is listening for RPC connections. +#[tracing::instrument] pub fn spawn_lightwalletd_with_rpc_server( zebrad_rpc_address: SocketAddr, lightwalletd_state_path: Option, @@ -56,6 +57,7 @@ pub fn spawn_lightwalletd_with_rpc_server( } /// Connect to a lightwalletd RPC instance. +#[tracing::instrument] pub async fn connect_to_lightwalletd(lightwalletd_rpc_port: u16) -> Result { let lightwalletd_rpc_address = format!("http://127.0.0.1:{lightwalletd_rpc_port}"); diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index 494f79ddce8..495ca341497 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -86,6 +86,7 @@ pub const MIN_HEIGHT_FOR_DEFAULT_LOOKAHEAD: Height = Height(3 * sync::DEFAULT_CHECKPOINT_CONCURRENCY_LIMIT as u32); /// What the expected behavior of the mempool is for a test that uses [`sync_until`]. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum MempoolBehavior { /// The mempool should be forced to activate at a certain height, for debug purposes. /// @@ -177,6 +178,7 @@ impl MempoolBehavior { /// On success, returns the associated `TempDir`. Returns an error if /// the child exits or `timeout` elapses before `stop_regex` is found. #[allow(clippy::too_many_arguments)] +#[tracing::instrument(skip(reuse_tempdir))] pub fn sync_until( height: Height, network: Network, @@ -297,6 +299,7 @@ pub fn sync_until( /// The zebrad instance is executed on a copy of the partially synchronized chain state. This copy /// is returned afterwards, containing the fully synchronized chain state. #[allow(dead_code)] +#[tracing::instrument] pub async fn perform_full_sync_starting_from( network: Network, partial_sync_path: &Path, @@ -354,6 +357,7 @@ pub fn cached_mandatory_checkpoint_test_config() -> Result { /// Returns an error if the child exits or the fixed timeout elapses /// before `STOP_AT_HEIGHT_REGEX` is found. #[allow(clippy::print_stderr)] +#[tracing::instrument] pub fn create_cached_database_height( network: Network, height: Height, From f46d0115e5cec2265adb0e1331e01c330b45d4f4 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 29 Aug 2022 09:52:19 +1000 Subject: [PATCH 22/42] fix(test): Show full Zebra test panic details in CI logs (#4942) * Handle test failure regexes using Result::Err, rather than panicking * Add output logs to test context, and add tests for that * Let empty test child logs be read again (and produce empty output) * Ignore missing test children when killing with ignore_exited * Fix a clippy lint * Rename `line` to `line_result` for clarity * Revert a redundant context_from() on kill() * Only ignore "no such process" kill() errors in sync_until() tests * Log the command timeout when an acceptance test fails * fix clippy Co-authored-by: Alfredo Garcia Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 1 + zebra-test/Cargo.toml | 2 + zebra-test/src/command.rs | 229 ++++++++++++++++++++++++------------ zebra-test/tests/command.rs | 167 ++++++++++++++++++-------- zebrad/tests/acceptance.rs | 33 +++--- zebrad/tests/common/sync.rs | 6 +- 6 files changed, 301 insertions(+), 137 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ff6668db39b..a38b2f61141 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6544,6 +6544,7 @@ dependencies = [ "color-eyre", "futures", "hex", + "humantime", "indexmap", "insta", "lazy_static", diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 1538a87b410..a68db9466dd 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -26,9 +26,11 @@ color-eyre = "0.6.1" # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } +humantime = "2.1.0" owo-colors = "3.4.0" spandoc = "0.2.2" thiserror = "1.0.32" + tracing-subscriber = { version = "0.3.11", features = ["env-filter"] } tracing-error = "0.2.0" tracing = "0.1.31" diff --git a/zebra-test/src/command.rs b/zebra-test/src/command.rs index 6ccdc59c38e..d56c94716c2 100644 --- a/zebra-test/src/command.rs +++ b/zebra-test/src/command.rs @@ -3,7 +3,7 @@ use std::{ convert::Infallible as NoDir, fmt::{self, Debug, Write as _}, - io::{BufRead, BufReader, Read, Write as _}, + io::{BufRead, BufReader, ErrorKind, Read, Write as _}, path::Path, process::{Child, Command, ExitStatus, Output, Stdio}, time::{Duration, Instant}, @@ -107,6 +107,7 @@ impl CommandExt for Command { failure_regexes: RegexSet::empty(), ignore_regexes: RegexSet::empty(), deadline: None, + timeout: None, bypass_test_capture: false, }) } @@ -221,59 +222,73 @@ pub struct TestChild { /// Only checked when the command outputs each new line (#1140). pub deadline: Option, + /// The timeout for this command to finish. + /// + /// Only used for debugging output. + pub timeout: Option, + /// If true, write child output directly to standard output, /// bypassing the Rust test harness output capture. bypass_test_capture: bool, } /// Checks command output log `line` from `cmd` against a `failure_regexes` regex set, -/// and panics if any regex matches. The line is skipped if it matches `ignore_regexes`. -/// -/// # Panics +/// and returns an error if any regex matches. The line is skipped if it matches `ignore_regexes`. /// -/// - if any stdout or stderr lines match any failure regex, but do not match any ignore regex +/// Passes through errors from the underlying reader. pub fn check_failure_regexes( - line: &std::io::Result, + line: std::io::Result, failure_regexes: &RegexSet, ignore_regexes: &RegexSet, cmd: &str, bypass_test_capture: bool, -) { - if let Ok(line) = line { - let ignore_matches = ignore_regexes.matches(line); - let ignore_matches: Vec<&str> = ignore_matches - .iter() - .map(|index| ignore_regexes.patterns()[index].as_str()) - .collect(); - - let failure_matches = failure_regexes.matches(line); - let failure_matches: Vec<&str> = failure_matches - .iter() - .map(|index| failure_regexes.patterns()[index].as_str()) - .collect(); - - if !ignore_matches.is_empty() { - let ignore_matches = ignore_matches.join(","); - - let ignore_msg = if failure_matches.is_empty() { - format!( - "Log matched ignore regexes: {:?}, but no failure regexes", - ignore_matches, - ) - } else { - let failure_matches = failure_matches.join(","); - format!( - "Ignoring failure regexes: {:?}, because log matched ignore regexes: {:?}", - failure_matches, ignore_matches, - ) - }; +) -> std::io::Result { + let line = line?; + + // Check if the line matches any patterns + let ignore_matches = ignore_regexes.matches(&line); + let ignore_matches: Vec<&str> = ignore_matches + .iter() + .map(|index| ignore_regexes.patterns()[index].as_str()) + .collect(); + + let failure_matches = failure_regexes.matches(&line); + let failure_matches: Vec<&str> = failure_matches + .iter() + .map(|index| failure_regexes.patterns()[index].as_str()) + .collect(); + + // If we match an ignore pattern, ignore any failure matches + if !ignore_matches.is_empty() { + let ignore_matches = ignore_matches.join(","); + + let ignore_msg = if failure_matches.is_empty() { + format!( + "Log matched ignore regexes: {:?}, but no failure regexes", + ignore_matches, + ) + } else { + let failure_matches = failure_matches.join(","); + format!( + "Ignoring failure regexes: {:?}, because log matched ignore regexes: {:?}", + failure_matches, ignore_matches, + ) + }; - write_to_test_logs(ignore_msg, bypass_test_capture); - return; - } + write_to_test_logs(ignore_msg, bypass_test_capture); + + return Ok(line); + } + + // If there were no failures, pass the log line through + if failure_matches.is_empty() { + return Ok(line); + } - assert!( - failure_matches.is_empty(), + // Otherwise, if the process logged a failure message, return an error + let error = std::io::Error::new( + ErrorKind::Other, + format!( "test command:\n\ {cmd}\n\n\ Logged a failure message:\n\ @@ -283,8 +298,10 @@ pub fn check_failure_regexes( All Failure regexes: \ {:#?}\n", failure_regexes.patterns(), - ); - } + ), + ); + + Err(error) } /// Write `line` to stdout, so it can be seen in the test logs. @@ -444,7 +461,7 @@ impl TestChild { let bypass_test_capture = self.bypass_test_capture; let reader = BufReader::new(reader); - let lines = BufRead::lines(reader).inspect(move |line| { + let lines = BufRead::lines(reader).map(move |line| { check_failure_regexes( line, &failure_regexes, @@ -459,32 +476,62 @@ impl TestChild { /// Kill the child process. /// + /// If `ignore_exited` is `true`, log "can't kill an exited process" errors, + /// rather than returning them. + /// + /// Returns the result of the kill. + /// /// ## BUGS /// /// On Windows (and possibly macOS), this function can return `Ok` for /// processes that have panicked. See #1781. #[spandoc::spandoc] - pub fn kill(&mut self) -> Result<()> { + pub fn kill(&mut self, ignore_exited: bool) -> Result<()> { let child = match self.child.as_mut() { Some(child) => child, - None => return Err(eyre!("child was already taken")).context_from(self.as_mut()), + None if ignore_exited => { + Self::write_to_test_logs( + "test child was already taken\n\ + ignoring kill because ignore_exited is true", + self.bypass_test_capture, + ); + return Ok(()); + } + None => { + return Err(eyre!( + "test child was already taken\n\ + call kill() once for each child process, or set ignore_exited to true" + )) + .context_from(self.as_mut()) + } }; /// SPANDOC: Killing child process - child.kill().context_from(self.as_mut())?; + let kill_result = child.kill().or_else(|error| { + if ignore_exited && error.kind() == ErrorKind::InvalidInput { + Ok(()) + } else { + Err(error) + } + }); + + kill_result.context_from(self.as_mut())?; Ok(()) } /// Kill the process, and consume all its remaining output. /// + /// If `ignore_exited` is `true`, log "can't kill an exited process" errors, + /// rather than returning them. + /// /// Returns the result of the kill. - pub fn kill_and_consume_output(&mut self) -> Result<()> { + pub fn kill_and_consume_output(&mut self, ignore_exited: bool) -> Result<()> { self.apply_failure_regexes_to_outputs(); // Prevent a hang when consuming output, // by making sure the child's output actually finishes. - let kill_result = self.kill(); + let kill_result = self.kill(ignore_exited); // Read unread child output. // @@ -496,7 +543,7 @@ impl TestChild { if wrote_lines { // Write an empty line, to make output more readable - self.write_to_test_logs(""); + Self::write_to_test_logs("", self.bypass_test_capture); } } @@ -506,7 +553,7 @@ impl TestChild { while self.wait_for_stderr_line(None) {} if wrote_lines { - self.write_to_test_logs(""); + Self::write_to_test_logs("", self.bypass_test_capture); } } @@ -526,12 +573,19 @@ impl TestChild { { self.apply_failure_regexes_to_outputs(); - if let Some(Ok(line)) = self.stdout.as_mut().and_then(|iter| iter.next()) { + if let Some(line_result) = self.stdout.as_mut().and_then(|iter| iter.next()) { + let bypass_test_capture = self.bypass_test_capture; + if let Some(write_context) = write_context.into() { - self.write_to_test_logs(write_context); + Self::write_to_test_logs(write_context, bypass_test_capture); } - self.write_to_test_logs(line); + Self::write_to_test_logs( + line_result + .context_from(self) + .expect("failure reading test process logs"), + bypass_test_capture, + ); return true; } @@ -552,12 +606,19 @@ impl TestChild { { self.apply_failure_regexes_to_outputs(); - if let Some(Ok(line)) = self.stderr.as_mut().and_then(|iter| iter.next()) { + if let Some(line_result) = self.stderr.as_mut().and_then(|iter| iter.next()) { + let bypass_test_capture = self.bypass_test_capture; + if let Some(write_context) = write_context.into() { - self.write_to_test_logs(write_context); + Self::write_to_test_logs(write_context, bypass_test_capture); } - self.write_to_test_logs(line); + Self::write_to_test_logs( + line_result + .context_from(self) + .expect("failure reading test process logs"), + bypass_test_capture, + ); return true; } @@ -583,8 +644,8 @@ impl TestChild { // either in `context_from`, or on drop. None => { return Err(eyre!( - "child was already taken.\n\ - wait_with_output can only be called once for each child process", + "test child was already taken\n\ + wait_with_output can only be called once for each child process", )) .context_from(self.as_mut()) } @@ -623,7 +684,9 @@ impl TestChild { /// /// Does not apply to `wait_with_output`. pub fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = Some(timeout); self.deadline = Some(Instant::now() + timeout); + self } @@ -654,10 +717,15 @@ impl TestChild { match self.expect_line_matching_regex_set(&mut lines, success_regex, "stdout") { Ok(()) => { + // Replace the log lines for the next check self.stdout = Some(lines); Ok(self) } - Err(report) => Err(report), + Err(report) => { + // Read all the log lines for error context + self.stdout = Some(lines); + Err(report).context_from(self) + } } } @@ -681,10 +749,15 @@ impl TestChild { match self.expect_line_matching_regex_set(&mut lines, success_regex, "stderr") { Ok(()) => { + // Replace the log lines for the next check self.stderr = Some(lines); Ok(self) } - Err(report) => Err(report), + Err(report) => { + // Read all the log lines for error context + self.stderr = Some(lines); + Err(report).context_from(self) + } } } @@ -739,6 +812,7 @@ impl TestChild { /// Note: the timeout is only checked after each full line is received from /// the child (#1140). #[instrument(skip(self, lines))] + #[allow(clippy::unwrap_in_result)] pub fn expect_line_matching_regexes( &mut self, lines: &mut L, @@ -762,7 +836,7 @@ impl TestChild { }; // Since we're about to discard this line write it to stdout. - self.write_to_test_logs(&line); + Self::write_to_test_logs(&line, self.bypass_test_capture); if success_regexes.is_match(&line) { return Ok(()); @@ -771,16 +845,20 @@ impl TestChild { if self.is_running() { // If the process exits between is_running and kill, we will see - // spurious errors here. If that happens, ignore "no such process" + // spurious errors here. So we want to ignore "no such process" // errors from kill. - self.kill()?; + self.kill(true)?; } + let timeout = + humantime::format_duration(self.timeout.expect("already checked past_deadline()")); + let report = eyre!( - "{} of command did not contain any matches for the given regex", - stream_name + "{} of command did not log any matches for the given regex,\n\ + within the {:?} command timeout", + stream_name, + timeout, ) - .context_from(self) .with_section(|| format!("{:#?}", success_regexes.patterns()).header("Match Regex:")); Err(report) @@ -794,11 +872,11 @@ impl TestChild { /// May cause weird reordering for stdout / stderr. /// Uses stdout even if the original lines were from stderr. #[allow(clippy::print_stdout)] - fn write_to_test_logs(&self, line: S) + fn write_to_test_logs(line: S, bypass_test_capture: bool) where S: AsRef, { - write_to_test_logs(line, self.bypass_test_capture); + write_to_test_logs(line, bypass_test_capture); } /// Kill `child`, wait for its output, and use that output as the context for @@ -814,7 +892,7 @@ impl TestChild { }; if self.is_running() { - let kill_res = self.kill(); + let kill_res = self.kill(true); if let Err(kill_err) = kill_res { error = error.wrap_err(kill_err); } @@ -872,9 +950,8 @@ impl Drop for TestChild { fn drop(&mut self) { // Clean up child processes when the test finishes, // and check for failure logs. - // - // We don't care about the kill result here. - let _ = self.kill_and_consume_output(); + self.kill_and_consume_output(true) + .expect("failure reading test process logs") } } @@ -1198,7 +1275,9 @@ impl ContextFrom<&mut TestChild> for Report { if let Some(stdout) = &mut source.stdout { for line in stdout { - let line = if let Ok(line) = line { line } else { break }; + let line = line.unwrap_or_else(|error| { + format!("failure reading test process logs: {:?}", error) + }); let _ = writeln!(&mut stdout_buf, "{}", line); } } else if let Some(child) = &mut source.child { @@ -1209,7 +1288,9 @@ impl ContextFrom<&mut TestChild> for Report { if let Some(stderr) = &mut source.stderr { for line in stderr { - let line = if let Ok(line) = line { line } else { break }; + let line = line.unwrap_or_else(|error| { + format!("failure reading test process logs: {:?}", error) + }); let _ = writeln!(&mut stderr_buf, "{}", line); } } else if let Some(child) = &mut source.child { diff --git a/zebra-test/tests/command.rs b/zebra-test/tests/command.rs index 0fac169f0e0..449058bc1cb 100644 --- a/zebra-test/tests/command.rs +++ b/zebra-test/tests/command.rs @@ -185,19 +185,15 @@ fn kill_on_timeout_no_output() -> Result<()> { } /// Make sure failure regexes detect when a child process prints a failure message to stdout, -/// and panic with a test failure message. +/// and fail with a test failure message. #[test] -#[should_panic(expected = "Logged a failure message")] fn failure_regex_matches_stdout_failure_message() { let _init_guard = zebra_test::init(); const TEST_CMD: &str = "echo"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &[]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } let mut child = tempdir() @@ -209,15 +205,21 @@ fn failure_regex_matches_stdout_failure_message() { // Any method that reads output should work here. // We use a non-matching regex, to trigger the failure panic. - child + let expected_error = child .expect_stdout_line_matches("this regex should not match") .unwrap_err(); + + let expected_error = format!("{:?}", expected_error); + assert!( + expected_error.contains("Logged a failure message"), + "error did not contain expected failure message: {}", + expected_error, + ); } /// Make sure failure regexes detect when a child process prints a failure message to stderr, /// and panic with a test failure message. #[test] -#[should_panic(expected = "Logged a failure message")] fn failure_regex_matches_stderr_failure_message() { let _init_guard = zebra_test::init(); @@ -230,10 +232,7 @@ fn failure_regex_matches_stderr_failure_message() { const TEST_CMD: &str = "bash"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &["-c", "read -t 1 -p failure_message"]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } let mut child = tempdir() @@ -245,9 +244,16 @@ fn failure_regex_matches_stderr_failure_message() { // Any method that reads output should work here. // We use a non-matching regex, to trigger the failure panic. - child + let expected_error = child .expect_stderr_line_matches("this regex should not match") .unwrap_err(); + + let expected_error = format!("{:?}", expected_error); + assert!( + expected_error.contains("Logged a failure message"), + "error did not contain expected failure message: {}", + expected_error, + ); } /// Make sure failure regexes detect when a child process prints a failure message to stdout, @@ -260,10 +266,7 @@ fn failure_regex_matches_stdout_failure_message_drop() { const TEST_CMD: &str = "echo"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &[]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } let _child = tempdir() @@ -279,6 +282,84 @@ fn failure_regex_matches_stdout_failure_message_drop() { // Drop should read all unread output. } +/// When checking output, make sure failure regexes detect when a child process +/// prints a failure message to stdout, then they fail the test, +/// and read any extra multi-line output from the child process. +#[test] +fn failure_regex_reads_multi_line_output_on_expect_line() { + let _init_guard = zebra_test::init(); + + const TEST_CMD: &str = "echo"; + // Skip the test if the test system does not have the command + if !is_command_available(TEST_CMD, &[]) { + return; + } + + let mut child = tempdir() + .unwrap() + .spawn_child_with_command( + TEST_CMD, + args![ + "failure_message\n\ + multi-line failure message" + ], + ) + .unwrap() + .with_timeout(Duration::from_secs(5)) + .with_failure_regex_set("failure_message", RegexSet::empty()); + + // Any method that reads output should work here. + // We use a non-matching regex, to trigger the failure panic. + let expected_error = child + .expect_stdout_line_matches("this regex should not match") + .unwrap_err(); + + let expected_error = format!("{:?}", expected_error); + assert!( + expected_error.contains( + "\ +Unread Stdout: + multi-line failure message\ + " + ), + "error did not contain expected failure message: {}", + expected_error, + ); +} + +/// On drop, make sure failure regexes detect when a child process prints a failure message. +/// then they fail the test, and read any extra multi-line output from the child process. +#[test] +#[should_panic(expected = "Unread Stdout: + multi-line failure message")] +fn failure_regex_reads_multi_line_output_on_drop() { + let _init_guard = zebra_test::init(); + + const TEST_CMD: &str = "echo"; + // Skip the test if the test system does not have the command + if !is_command_available(TEST_CMD, &[]) { + return; + } + + let _child = tempdir() + .unwrap() + .spawn_child_with_command( + TEST_CMD, + args![ + "failure_message\n\ + multi-line failure message" + ], + ) + .unwrap() + .with_timeout(Duration::from_secs(5)) + .with_failure_regex_set("failure_message", RegexSet::empty()); + + // Give the child process enough time to print its output. + std::thread::sleep(Duration::from_secs(1)); + + // Drop should read all unread output. +} + /// Make sure failure regexes detect when a child process prints a failure message to stdout, /// then the child process is killed. #[test] @@ -289,10 +370,7 @@ fn failure_regex_matches_stdout_failure_message_kill() { const TEST_CMD: &str = "echo"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &[]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } let mut child = tempdir() @@ -307,7 +385,7 @@ fn failure_regex_matches_stdout_failure_message_kill() { // Kill should read all unread output to generate the error context, // or the output should be read on drop. - child.kill().unwrap(); + child.kill(true).unwrap(); } /// Make sure failure regexes detect when a child process prints a failure message to stdout, @@ -320,10 +398,7 @@ fn failure_regex_matches_stdout_failure_message_kill_on_error() { const TEST_CMD: &str = "echo"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &[]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } let child = tempdir() @@ -352,10 +427,7 @@ fn failure_regex_matches_stdout_failure_message_no_kill_on_error() { const TEST_CMD: &str = "echo"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &[]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } let child = tempdir() @@ -379,7 +451,6 @@ fn failure_regex_matches_stdout_failure_message_no_kill_on_error() { /// /// TODO: test the failure regex on timeouts with no output (#1140) #[test] -#[should_panic(expected = "Logged a failure message")] fn failure_regex_timeout_continuous_output() { let _init_guard = zebra_test::init(); @@ -389,10 +460,7 @@ fn failure_regex_timeout_continuous_output() { const TEST_CMD: &str = "hexdump"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &["/dev/null"]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } // Without '-v', hexdump hides duplicate lines. But we want duplicate lines @@ -406,9 +474,16 @@ fn failure_regex_timeout_continuous_output() { // We need to use expect_stdout_line_matches, because wait_with_output ignores timeouts. // We use a non-matching regex, to trigger the timeout and the failure panic. - child + let expected_error = child .expect_stdout_line_matches("this regex should not match") .unwrap_err(); + + let expected_error = format!("{:?}", expected_error); + assert!( + expected_error.contains("Logged a failure message"), + "error did not contain expected failure message: {}", + expected_error, + ); } /// Make sure failure regexes are checked when a child process prints a failure message to stdout, @@ -423,10 +498,7 @@ fn failure_regex_matches_stdout_failure_message_wait_for_output() { const TEST_CMD: &str = "echo"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &[]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } let child = tempdir() @@ -447,17 +519,13 @@ fn failure_regex_matches_stdout_failure_message_wait_for_output() { /// Make sure failure regex iters detect when a child process prints a failure message to stdout, /// and panic with a test failure message. #[test] -#[should_panic(expected = "Logged a failure message")] fn failure_regex_iter_matches_stdout_failure_message() { let _init_guard = zebra_test::init(); const TEST_CMD: &str = "echo"; // Skip the test if the test system does not have the command if !is_command_available(TEST_CMD, &[]) { - panic!( - "skipping test: command not available\n\ - fake panic message: Logged a failure message" - ); + return; } let mut child = tempdir() @@ -472,9 +540,16 @@ fn failure_regex_iter_matches_stdout_failure_message() { // Any method that reads output should work here. // We use a non-matching regex, to trigger the failure panic. - child + let expected_error = child .expect_stdout_line_matches("this regex should not match") .unwrap_err(); + + let expected_error = format!("{:?}", expected_error); + assert!( + expected_error.contains("Logged a failure message"), + "error did not contain expected failure message: {}", + expected_error, + ); } /// Make sure ignore regexes override failure regexes. diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 58bc9f3b1a0..13718fd00d7 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -258,7 +258,7 @@ fn start_no_args() -> Result<()> { // Run the program and kill it after a few seconds std::thread::sleep(LAUNCH_DELAY); - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; let output = output.assert_failure()?; @@ -285,7 +285,7 @@ fn start_args() -> Result<()> { let mut child = testdir.spawn_child(args!["start"])?; // Run the program and kill it after a few seconds std::thread::sleep(LAUNCH_DELAY); - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; // Make sure the command was killed @@ -312,7 +312,7 @@ fn persistent_mode() -> Result<()> { // Run the program and kill it after a few seconds std::thread::sleep(LAUNCH_DELAY); - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; // Make sure the command was killed @@ -380,7 +380,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) .spawn_child(args!["start"])?; // Run the program and kill it after a few seconds std::thread::sleep(LAUNCH_DELAY); - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; // Make sure the command was killed @@ -547,7 +547,7 @@ fn valid_generated_config(command: &str, expect_stdout_line_contains: &str) -> R // Run command using temp dir and kill it after a few seconds let mut child = testdir.spawn_child(args![command])?; std::thread::sleep(LAUNCH_DELAY); - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; let output = output.assert_failure()?; @@ -629,8 +629,11 @@ fn invalid_generated_config() -> Result<()> { // and terminate. std::thread::sleep(Duration::from_secs(2)); if child.is_running() { - child.kill()?; - return Err(eyre!("Zebra should not be running anymore.")); + // We're going to error anyway, so return an error that makes sense to the developer. + child.kill(true)?; + return Err(eyre!( + "Zebra should have exited after reading the invalid config" + )); } let output = child.wait_with_output()?; @@ -654,7 +657,7 @@ fn stored_config_works() -> Result<()> { child.expect_stdout_line_matches("Starting zebrad".to_string())?; // finish - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; let output = output.assert_failure()?; @@ -1007,7 +1010,7 @@ async fn metrics_endpoint() -> Result<()> { assert!(res.status().is_success()); let body = hyper::body::to_bytes(res).await; let (body, mut child) = child.kill_on_error(body)?; - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; let output = output.assert_failure()?; @@ -1082,7 +1085,7 @@ async fn tracing_endpoint() -> Result<()> { let tracing_body = hyper::body::to_bytes(tracing_res).await; let (tracing_body, mut child) = child.kill_on_error(tracing_body)?; - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; let output = output.assert_failure()?; @@ -1180,7 +1183,7 @@ async fn rpc_endpoint() -> Result<()> { let subversion = parsed["result"]["subversion"].as_str().unwrap(); assert!(subversion.contains("Zebra"), "Got {}", subversion); - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; let output = output.assert_failure()?; @@ -1503,10 +1506,10 @@ fn lightwalletd_integration_test(test_type: LightwalletdTestType) -> Result<()> // // zcash/lightwalletd exits by itself, but // adityapk00/lightwalletd keeps on going, so it gets killed by the test harness. - zebrad.kill()?; + zebrad.kill(false)?; if let Some(mut lightwalletd) = lightwalletd { - lightwalletd.kill()?; + lightwalletd.kill(false)?; let lightwalletd_output = lightwalletd.wait_with_output()?.assert_failure()?; @@ -1719,7 +1722,7 @@ where // Wait a few seconds and kill first node. // Second node is terminated by panic, no need to kill. std::thread::sleep(LAUNCH_DELAY); - let node1_kill_res = node1.kill(); + let node1_kill_res = node1.kill(false); let (_, mut node2) = node2.kill_on_error(node1_kill_res)?; // node2 should have panicked due to a conflict. Kill it here anyway, so it @@ -1872,7 +1875,7 @@ async fn delete_old_databases() -> Result<()> { assert!(outside_dir.as_path().exists()); // finish - child.kill()?; + child.kill(false)?; let output = child.wait_with_output()?; let output = output.assert_failure()?; diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index 495ca341497..743a830eb92 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -250,7 +250,7 @@ pub fn sync_until( // make sure the child process is dead // if it has already exited, ignore that error - let _ = child.kill(); + child.kill(true)?; Ok(child.dir.take().expect("dir was not already taken")) } else { @@ -393,7 +393,9 @@ pub fn create_cached_database_height( child.expect_stdout_line_matches(stop_regex)?; - child.kill()?; + // make sure the child process is dead + // if it has already exited, ignore that error + child.kill(true)?; Ok(()) } From 58c4a62d2f93b9d9e4e1c40ceacef166be385b4d Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Sun, 28 Aug 2022 19:56:58 -0400 Subject: [PATCH 23/42] ci(deploy): do not run `versioning` job when pushing to `main` (#4970) Previous behavior: When a push was detected in the `main` branch, the workflow would run the `versioning` job and crash trying to detect the version being deployed as there was none. Expected behavior: Do not fail the `versioning` job when pushing to `main` Solution: Limit the `versioning` job to only run when a release event is triggered and allow the `deploy-nodes` job to run even if `versioning` is skipped --- .github/workflows/continous-delivery.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index b896d1ce2aa..1d34417664d 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -40,6 +40,7 @@ jobs: runs-on: ubuntu-latest outputs: major_version: ${{ steps.set.outputs.major_version }} + if: ${{ github.event_name == 'release' }} steps: - name: Getting Zebrad Version id: get @@ -87,7 +88,7 @@ jobs: permissions: contents: 'read' id-token: 'write' - if: ${{ (github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' }} + if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release') }} steps: - name: Inject slug/short variables From 09420d57bea86a02acb9dd6f50be1e993f8b0d4f Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 29 Aug 2022 12:45:13 +1000 Subject: [PATCH 24/42] fix(ci): Update mainnet and testnet checkpoints (#4972) * Update mainnet and testnet checkpoints * Add instructions for resolving CI sync timeouts Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- book/src/dev/continuous-integration.md | 42 ++- .../src/checkpoint/main-checkpoints.txt | 340 ++++++++++++++++++ .../src/checkpoint/test-checkpoints.txt | 108 ++++++ 3 files changed, 483 insertions(+), 7 deletions(-) diff --git a/book/src/dev/continuous-integration.md b/book/src/dev/continuous-integration.md index 16089ff9da4..f422efdf90e 100644 --- a/book/src/dev/continuous-integration.md +++ b/book/src/dev/continuous-integration.md @@ -1,5 +1,7 @@ # Zebra Continuous Integration +## Overview + Zebra has extensive continuous integration tests for node syncing and `lightwalletd` integration. On every PR change, Zebra runs [these Docker tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-docker.yml): @@ -10,17 +12,43 @@ On every PR change, Zebra runs [these Docker tests](https://github.com/ZcashFoun When a PR is merged to the `main` branch, we also run a Zebra full sync test from genesis. -Some Docker tests are stateful, they can depend on: +Currently, each Zebra and lightwalletd full and update sync will updates cached state images, +which are shared by all tests. Tests prefer the latest image generated from the same commit. +But if a state from the same commit is not available, tests will use the latest image from +any branch and commit, as long as the state version is the same. + +Zebra also does [a smaller set of tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-os.yml) on tier 2 platforms using GitHub actions runners. + + +## Troubleshooting + +To improve CI performance, some Docker tests are stateful. + +Tests can depend on: - built Zebra and `lightwalletd` docker images - cached state images in Google cloud - jobs that launch Google Cloud instances for each test - multiple jobs that follow the logs from Google Cloud (to work around the 6 hour GitHub actions limit) - a final "Run" job that checks the exit status of the Rust acceptance test +- the current height and user-submitted transactions on the blockchain, which changes every minute To support this test state, some Docker tests depend on other tests finishing first. - -Currently, each Zebra and lightwalletd sync updates the cached images, which are shared by all tests. -Tests prefer the latest image generated from the same branch and commit. But if they are not available, they will use the latest image from any branch and commit, as long as the state version is the same. - -Zebra also does [a smaller set of tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-os.yml) on tier 2 platforms using GitHub actions runners. - +This means that the entire workflow must be re-run when a single test fails. + +### Resolving CI Sync Timeouts + +CI sync jobs near the tip will take different amounts of time as: +- the blockchain grows, and +- Zebra's checkpoints are updated. + +To resolve a CI sync timeout: +1. Check for recent PRs that could have caused a performance decrease +2. [Update Zebra's checkpoints](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints) +3. Wait for a full or update sync to finish with the new checkpoints +4. The GitHub actions job limit is 6 hours, so the ideal job time is 4-5 hours. + If any GitHub actions job times out, or takes over 5 hours: + a. [Split the job based on the sync height](https://github.com/ZcashFoundation/zebra/pull/4961/files#diff-4c3718f100312ddc9472f5d4ab2ee0a50a46f2af21352a25fca849734e3f7514R732), or + b. Adjust the sync heights in existing jobs. +5. If a Rust test fails with "command did not log any matches for the given regex, within the ... timeout": + a. If it's the full sync test, [increase the full sync timeout](https://github.com/ZcashFoundation/zebra/commit/9fb87425b76ba3747985ea2f22043ff0276a03bd#diff-8fbc73b0a92a4f48656ffe7d85d55c612c755202dcb7284d8f6742a38a6e9614R367) + b. If it's an update sync test, [increase the update sync timeouts](https://github.com/ZcashFoundation/zebra/commit/9fb87425b76ba3747985ea2f22043ff0276a03bd#diff-92f93c26e696014d82c3dc1dbf385c669aa61aa292f44848f52167ab747cb6f6R51) diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 7da6a804a8d..b5f43c448c3 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -5627,3 +5627,343 @@ 1777371 0000000001e3b44d75ac5e2d1a1a165e858a61bf5271caa797d2d1b3c6c96fb3 1777421 0000000001c25c23ef85571fe0f78c818c0440db6143af5bb5b3c9824581ad66 1777478 00000000020631eda70ad4f5e580a7ee0fe47f1cc88ed500113d677b728e71b8 +1777560 000000000117ceb3ea9a46f6b74975555bd951b9cb7fd228772bbd16ddc9a54a +1777713 0000000000b576e96c99d91ec4c7f34d02a72af670877d9347e1c4d7d94b6081 +1777816 0000000000cab146500264ffe909a2c3d694177a152e103484da51224b3c81ba +1777857 00000000009c7e305f1d8a1859e6854386e68f3b881fd5383c159e994bccb839 +1777923 000000000079e5ba3598442e1bcf639ce6555570b14b5fac1e95eb4cefd9e4fe +1777969 0000000001a9d4e9513497a0df58ce1aef475ca9070a87912b04097037b105ab +1778020 000000000087d2a403e4518afa756f0061f4d6e113e35a29c5fd70f30facc8e3 +1778085 0000000001002b223dca3206ee8b8afd2180588a6aa241970880c7d012381814 +1778130 00000000016959e2229562cceacf0986746d3585f10fce341c8e146a19f37d2c +1778190 00000000017c8cdb701dec422d26854cefaa550ceb15698490233c2cb3877546 +1778243 0000000000049131efee718be7a9a980e437578891a06eb496a25508d3915530 +1778300 0000000000f725cca1f18b273f30ae86ae388374fe378887a57345f67c0f8438 +1778354 0000000000dc4be4487736225ef7f7a0c9b27eb9762113202d39c9bff7453e04 +1778409 000000000118918f7cfa771cd9a62c8a3448652a8ca22fcf89859d47392f2a9d +1778447 0000000001e79d6701a15a5ca6e2b111ae5f6f1784629a980e36ac6b9bd233f8 +1778512 000000000061e735423476c3afd0ece3822436fa65f04061fbc49ea1de1054df +1778566 00000000015d7613ee542283aafb56a9efd5ecd09d2e7c2318f94efae50a5f2a +1778616 00000000006131e27efcb2e84b8e87160f110b619a3623337d3d70fefc1d3260 +1778668 0000000001a1b5ae41e7b3d68442584a0311b6645ba81150df53ac7cd0073d8c +1778726 0000000000551b9d8c9ad07587d2ee64efc8e00d6f22cf046eeae2b76391c618 +1778785 0000000000f9c112855fbf6dcc6c0d4e47ba63f0490dced6dc834fb3727fc18f +1778838 0000000000f43e2796790231a0bb249ee958b9a87eff05bf430b42fb95bbeebf +1778878 0000000000373ef7b9e4b19f17fd95eb62421f0294cb67df8e12b9e124ad8cb8 +1778938 0000000001af1ecdf6ff592c970809aeadb87c5a44b2939c817bdafc5c3915ab +1778986 00000000010d96c5b483713c27e1ef0bb589b30118668a7973ddb977955bfd1c +1779039 00000000002deb4820dfb7e5308ab1597f6f2e20ea2ad98324b4d1d24951524a +1779091 0000000000e9cada5b21ad43e5cb0b6cd2baed89ebda1ea2de61e50e2e20d244 +1779144 0000000000f3ba5cd1a5ff96cb8f5d529e3721c5fbb7440d5befaa032eac1bab +1779198 00000000014a9e493676d04de8e03555e14d50f004ca4afacc718d4f2041e204 +1779244 0000000001141859c49b7c726a140cc2728ba1da1604a32bfd83b333b8d6cf69 +1779276 0000000000f86117d80d84efdf87eb05c5721da8ad262348c6e022d1483913cb +1779348 000000000059b7a6ee3b5a1fe850fb28b706ef6692dcb87b2be96198bc0deba3 +1779470 000000000084ab0bb6bead7f4fd8459ecb66ec6adc87dff1a3e79259e9bc2386 +1779626 00000000013900ce35496f87d4cfeb90995e9de2a734c9664326ab52bf762157 +1779658 0000000000ed6d2918b8f1590819088e282ce89dcda23ed6016d3870dabf0ac9 +1779689 0000000000f14b6bd51bc6b5951be45bc57e5cb68e15d2e99c267aee2a79f3f5 +1779722 00000000000cd20aecc8cc2f7899cef1ef42c4aecac4ba3f8448a404e986bebc +1779747 000000000131a35c065c6890f96aac8772e6f62170d2b531c6535183816c4f10 +1779780 00000000013cc2441222364ba82fff0e7e21c05fe1fc7757c076cbf31919d43c +1779806 00000000007fbc67e660d95cae324c5f61cf9a14b4d9609f7388fabbf17c812c +1779838 00000000001181b6275ed44c1eb3993271acad033777b4beb382b54028f37daa +1779868 0000000001343dfc1042ac00fcfd930f37942b397474eb3b7159a394c9743af4 +1779897 000000000101e66a72c12370ab63290ba9e124da2bf3e1fa5e5a28be37b93b44 +1779931 00000000005ff1a76dfe9a9a0a30f91f2bd58e25dfbb838b921d3e049de5e6e9 +1779966 00000000008fea5eb50321cae50292187860590080def1330d332640c555ef9c +1779996 00000000009cef7b77c8d375eb8d3c89cabfed987922827f7638937898940c36 +1780026 0000000000e63e6d76f66a7d2e9f4ee2e45b524fe7cc5d00e275a70bf0e72bb7 +1780063 0000000000ab3a544a3235260994398169b8684908b1434501ee76cfc08ed381 +1780097 0000000001ad211a48f62c6884b9ee117d589b31b593132d70c34de949e98c7d +1780133 00000000008f8427a660fb4332efe38007c015204bdfccb86363c19a035fb4c6 +1780165 0000000000cb0986de0308e91906e11b9c2b1600f172a2903bdd0979b29dd529 +1780197 000000000080d3919424b0b6cdadb4227c8c1ae98d31f8de2679a14c8f8f4195 +1780220 0000000000215b806951b289f5348a9b9890b3625e55b4129030b5d9b3400254 +1780250 0000000000668aece05c24fbb7287ef0c2e6ece347a73fe4f08542d87184591f +1780285 00000000017dd9de47a767966bd5786e85b5b6855308d46f181197b74cbf3557 +1780315 00000000014065326206fa0d6455705c25f71b15392285e2b6ee7ec1cff52fc9 +1780351 000000000134bc7d2bff231204c94f6a9164ee79830c47fd66549f6e83c2b052 +1780381 0000000000ce2e70dd47a3c7cf84422cbad8c154c6ab65df6d83f5685bd2d9ef +1780420 00000000016b985a7264c940213b9240f3dfb35bf322c34b6ecf3f0d0af5d5e4 +1780461 0000000000df72a68524d6a0d1ac80a97eddf40b7d24d31c6b659ced1f24450d +1780495 000000000176ce381cf02319d245f3f1ad592a4c1f6b6a4d84a653b2407e969d +1780533 0000000000746e59f535728e76d1e86f80e7e2d4d15e193c071dd0c976956a82 +1780561 000000000186a6e16968a8603daa87fe56d84b55f4a5a5762bcc0ca0caf1cb02 +1780604 00000000007aa504ec464dabb6a921bad647dd5b1cf638136a7ca0d73b106d8d +1780645 00000000001a59fc3b6e76ef33cd6f4f751ce8ed59a50ec94b4a14aa6e8da6d9 +1780689 00000000008f0ba447e763b5640e29726dc70864ba104b0af74f9adfbc56a2a6 +1780731 0000000000b67426ae514c7f82801facd227950d4699bf0e623ce8a691c14870 +1780771 0000000000c0b5dcfe1979eb76de8ea99b41c05e97f8e6c1f97ef96793ea7136 +1780798 0000000001966c950f01116479d2a918036657e1c3d09f643c5b5885d7ee138f +1780833 000000000093df1ed22f581ccdc461a762fd4bfff6a806092936f8db82c845c1 +1780863 00000000008d46a994990a4169d6dcc98f00993e2d69a1b4ab2052f2b51b0ef9 +1780898 00000000000df7626b9d1e453d9678bcd1227fbbf49c25bc7e082bb22ba991ba +1780944 000000000086cf6fe43094d2062fd2c919be0978e7a7868420e329fa262ea162 +1780981 0000000000368dbdaa42e0ab276dcd2edc6c773ea3e68b9585a8e08f34c5ffdb +1781014 0000000000d976b01b1ef00aaa2ee470b3a67bb6ba960825f9c314d591fbffc0 +1781047 000000000007f46a4c54b2b6dac8bb04d36f7325e8e148b92d9d3c507f5ddddd +1781074 0000000000cdd5c346b72b1ab37058313b43f73600ce36a96b4b54f3d76c7cb4 +1781100 000000000070f879782b89f6d11bebc3191a5624825a6d3a8cc810ff27bac8b3 +1781132 000000000001c8dd15311587a84667f8f99cc52897cfaf07f430ee934df5e188 +1781159 0000000000dcb29f1a0fe1ae1578b16fad08820247a725608b790b25dfa1256a +1781192 00000000013fa6814eab856e1f2c4a8bdb945d19b9d7290231ff574ff2c7be27 +1781221 00000000014959df844475e5c77134b863236c04eefbf17f5b697a250c38bb63 +1781264 00000000018903a91f314747dc0b0892948c2066cd71e7f755e3fae19febe0c0 +1781295 000000000058d9491596d90625b71c7132d37d7c719472662e84d5ae98ff2028 +1781330 0000000000bf0b9073a17b260c30473f0005c29468080468309be9937be17148 +1781374 00000000005466f16605246b63d76f6ea4d7bee8a917768a0c779687b890496e +1781409 0000000000665240c875829ca962cb29253a1fc2e6f7d5bcab5c53f17d8d8b00 +1781437 00000000005b42e6a046a6b9e5b3602b76292b8853a715ba270b44d6d9c80534 +1781474 0000000001499e56d453627b46551c66b7279ab1bc2cd0b36e43f35f75e748e5 +1781505 000000000097fe03070c2e7e75d4d4d08fac1abdad4ab75de2bf623bd5ea2aa7 +1781536 00000000001f8f7739b079eec0241cec4e1da8f02812ce6fa6d705406f6a6276 +1781561 0000000001198bdf0b2c9b895694a9e6f32d1ba4b3b18eae2a69447a04b11c3c +1781597 00000000008cce14b8c67ea2ad56f66fdf6ad9c9c23bbaaf5470fbae84fcf222 +1781634 00000000008509651ea8d4cfebc611c8968c8c6fd764075b83311106ffe6b7ea +1781662 00000000001ce7bb45d8f1c73ab5cb263c3cefc1bfc8bd6b8b78dd2f94a12213 +1781695 000000000099ab9d75fb3020e6ea491f78734dd924cc3b7a503d1e16338cb324 +1781729 0000000000d8fa9d6c25d861f31ff130e756289a5c83b970dba4c58a685262f5 +1781763 0000000000b2ebaa8f46b688ebff4a427fe0b38afd8e65abf1e32ec6e0310cc7 +1781797 00000000014aa2e0ac63e142d98143058a26671b153da6e674e9c7987671f219 +1781822 0000000001b2b24d09a48c4087d8d7733a465efd79917849babf44d1723026a9 +1781853 00000000003fd8076e706a6d559c54cdb5bef65b8523be15fd590a8b0739d14e +1781882 00000000005689434b0d4715c0253114e38892e3c82730628acc8f2437e96493 +1781914 0000000000f75a5b6d6125084b5c98a04fc08b964fb554ead7b61d481c4e6690 +1781946 0000000000008ed96a45cacb5f87132fae56416480707240fd02588aaf0839a3 +1781983 00000000017a6aacc78bbb09846ae4867617e2040da55df27f256383aea77f38 +1782016 00000000004281e4c3a2f9a3e8285697c07c0bcccd65013d02414d0accdd2c1e +1782044 0000000000d8bb05546fed082f0c116df3e9933de6406afcfe586bb4fe239e75 +1782070 00000000015f3dc90bc93a152d087c5951edfb39a186efb0199a940e273a9f60 +1782100 00000000010c534a71449028b96393037be0ae48ec1f37cca6d682c9ebb34960 +1782137 0000000000b4573956bb86c0535753b99c7bf73aadeca01cfc1109992e2c238b +1782163 0000000000991fcd1bc8b19aa30dfe7dc4a7eae9616027a5c97e2f51b912272c +1782194 0000000001648493956ad572602796da5ca42c866bbf461170410f163b2b1bf7 +1782232 00000000013afab84582e31c1d2f443aa4bdcfaa5e5eeef1011baa8f97ef382e +1782271 00000000005e1dc3ac4ca793c00000d0c41d1af764e947ccb70e54555ea1bcda +1782304 0000000001526b899643c4829d5d62b43b2127f26a2b4e6c057fb11cdfdfa90b +1782336 000000000000acb38dcfca9a2d1ac732f1d721aad1d8a31e6b9c48c743b9610a +1782371 00000000002f6a61f0c6d81f9bf8ce0ef9f8fd533080f5b9859a994822e8c9aa +1782404 000000000134fedc4f7bb328476930759a5f8fbfc9b90c5f0b87daa2916a268a +1782441 000000000059df1b64f8757b9c9fa7d559f1b71bbe0b47a828003d16fbb9f1f7 +1782472 00000000010ef538629e39ef1d53f109c50c4609303c61208fa2aef02d495c54 +1782502 00000000011877113598af5f19bae54abe4d91380f2903adfc694d96e0312aaf +1782533 000000000004a4090af80faef60a1f3f6a3afc6476a862e5fb1cb803eeff28b1 +1782564 0000000000987dfc99b3645d9a395a36ec824384463ea8db209bf0486b4a3e9d +1782590 00000000018960c7d982a87bbf2b669072ba89c39f6c036594365b23a447d5ba +1782621 000000000076a8b527d8d6b959f8894947a4ee09a0bd95f6ffb2cc2b6b44ce25 +1782664 00000000000bcb625f09e76a2e5e9c0a6c7e6f8835a4e937d7fced53c8d7754a +1782694 00000000012f2dad344de090ed60717eacf70a391680e914c9178d6e41c54360 +1782727 00000000007d1ab7a1b84879a84d6433c3745c71b7d68f166c32ab21a18db3c5 +1782766 0000000001c667a030d65dd8e6ad8ac434454610bca6992059345b05b442cf14 +1782793 000000000126a217f46940159dd385f6441e3f703a97ffb6262e1bad22cd53a7 +1782821 000000000145c7aed38a71ec9aa0d4b095d7729539df1c996f9d1083c8e4e012 +1782854 0000000000b6bfa752dd07c74e1020b0c7750b295777785330bcd2a15d5b65cf +1782883 0000000000c375d69c8155a799d4367f66e0902368a425b6866ab375d968c9ba +1782913 00000000002b2d41a22411d420edcbb18c2697a3148b84ae2e7a14cbc68e5472 +1782945 00000000004e294cc9f22dc2dca3ad7dcac0c60a039558fbc8f0c84e26b3fda5 +1782971 00000000007807795a332bf2b0bd95f89394ff37941d6b51b97e65b14e798dbe +1783005 000000000182f9d7eb22f69500eb2b54b8ef6260ffa0edb0da4b10ef93b2b04d +1783050 0000000001634ab56a62de8ba8dda348ca21a3362a773d5ab33ac3e078acf747 +1783094 000000000142bcd3333713ca46dccc5a28668dcc695388fa2cb5a06869a998e8 +1783119 000000000076061d040787fe26cc1c39da3124f650df1ae80bc6ab40aa3dfdb5 +1783155 000000000119dbe5e9e7d6c09273e961567b97470e35465a5b83d6adc37b9736 +1783189 0000000000619b562063b62612bc9c295d51a802ef21c59bc52d1dbb10e88098 +1783221 00000000010e965fbbb2a25d551fa9c05b06750ee538403975c10ac741b26dd6 +1783247 0000000000ea23d20428ea46428bb563685056a1be219d1699f8c21c65a8fa4c +1783275 000000000008611a9c1a55d3139fd6e3be87499dd8c643bccb37b369c4d2a067 +1783307 00000000016c6078302ee3c480d658ce2712e80c0b34f15dcf9f80dbd8034f8f +1783333 00000000014b4708310e6c9381b8ba04993127cf6a41f9f5b0ed1e16549e89f0 +1783363 0000000000077adffa77ed24a1dd4d81b2fc14788074101f793de231241e37e3 +1783394 00000000007f7ff1ac506883a2ff8ebf983e4d63043e6016069514fc90e05c07 +1783423 0000000001105e3731e436ee0f62fcc26d13b1a80bd4047dd2e58b751eda27d2 +1783455 0000000001abdf97d05fd61a497897db32f9b615dc9b1681c5ba36e850e382e6 +1783492 000000000176268fb09087fd984ef50e0212598c015232db225f09cd03e063ec +1783524 00000000007de8d0ccf47d881ff179258deb35346c0653c61261d3bc95a6d95c +1783561 000000000133413711dc59f7e5af8251183f65adfd3b88b38ed63003b04ca00a +1783598 00000000013c8e6a7ec8466ceabbc8d07495f34d30eb7630479255a08e9d0b68 +1783626 0000000000406e5eebc42de32d8c2e724625453eb88cb446e4f8c9bc4c23b5d7 +1783660 0000000001d83efef4d7bc53c1cec3452ec071ad0733af2eb47fbf4c460784c8 +1783698 0000000001728a60de76d727d847bd0e675ab2d83f2523f148baddb44635d2b7 +1783729 00000000017f5462bc3ca7d250cbdcb21f77c2bd56cd75887b98112911edc583 +1783771 000000000109390133fd33788dbb5e61071ea52374211c388d82963e12590830 +1783802 0000000001df93cbeb85c651d5e36012cde6c58c96a3d51930c8ffbbfa0b18e7 +1783830 00000000009d70706a1b2a6d16623f481c623353a2b7cce6e7037966b0975814 +1783860 0000000000e07aeb116712bcbeed06707a09ef32d00390143778a7f7fc589155 +1783891 000000000044ee2771d92a9d445cc019b71386796475e7c0776a2d405d71643a +1783922 00000000017653c088457d3c9be928b36fac26fe827274a92b8d5e76d4d672af +1783959 0000000000ad89743fadfdc7a6f2a3de7600e32855ec41ba06d7ddebda9067cb +1783995 00000000010277f315e6125722b123f3404b7750494ced1d81f6800517eb700f +1784028 000000000139dd96ead846a0066dbcb2b41a51f3d85259b10bb4c01c90d19250 +1784061 00000000008b3987da50bfe971c496f40a6677f930c30ce3d08e050a2e5015e6 +1784091 000000000036097f73c23ff15392ff1f7f21cc55801b630af9921fadedb42e2e +1784128 00000000012f7040bd91397a2842415016c65acb7ba55e3a66ebb830106958a4 +1784167 0000000000b3cb60c041fa18f7421685a3cc04d94f85c4aea2e9841c96cbd4e4 +1784194 00000000005ea7d51bbacebb3bea6434eac05712d48b5936c3d3f929498fb194 +1784225 00000000012fdbe0d1c81f84efaf6159640a03f5d341e321219c7ae99ba1640d +1784260 00000000015d0c5004d2e8372a27685ac0068c9b5b95ba27d847948d8a6b87ce +1784293 0000000000bf6b1b474f94fdf8d243ec49cb95ea1f977743bb516e057fccf860 +1784334 00000000006f5f43e5c6b46143d7c75ac24e1458b9ecb3a4ef193d1ceb4f192b +1784364 000000000107cffe2dd0269002b1291fc585b19118758c4965894b6ba534954a +1784401 00000000012d644b0c2510d3e138767a90b1c35c47ad274f4be226b712f15c60 +1784430 0000000001c2df08cce7b73c5c73b4a1556d22d45ea42b2dc3d324eb23cf3bf7 +1784461 0000000000fe60c7ef5d3d3391d3274046c6756ba3768d38db2f900b4d1d6f40 +1784492 000000000156997a66b336307dd876330b12224855b63d23d41c5df5b21dcecf +1784527 0000000001c3fe1cd9bc8630a535a206f6f7bf26fb784e3bbd0ff040680a3f5a +1784562 00000000013226f980ef327a7d1e905e175949b6b5b037679b555eb9e966b0d7 +1784592 00000000001c9645647184bb5bf0b8551a7256633085c50eeb54365c1a2a0d9f +1784626 00000000001415cd589268c9b7dfeab47e13c8368f97d7ae77ee554310f11b73 +1784660 000000000119b4e8b8de55cb97fd476211d8a165cff02bad3ec303571514f896 +1784693 0000000000e940bd707892c72b47155b35be13c93e0f73c31f538ff39403e8ec +1784724 000000000041bd6b76852550eee5647f0fc9a2b682fda64edee6aa517c54d484 +1784751 0000000000c08a179a34af058093e6d2e856c1c70406b290cf6b5670aaed89fb +1784782 00000000003f8592893990a338740a75544f77f8b35f3f6a5e154d53bd2ab2ed +1784815 0000000000ad649b999c932f60d7741e6e9403a4dccb1384e5165366f6e27cfb +1784855 00000000009c64bf6c781ac0270e916f43c8816afee34d0fa11242c537f6bb9d +1784884 00000000008d041ab4d49d9ca0e0edb0b08d2409aa85d17e126a3a7a16db5c0a +1784917 00000000016133ffa563dee0abf336f98bbb402966f90a878a3893de23032a14 +1784953 00000000010c339c6384baf7c5e39f00feda422772e5b6bf398182417537e897 +1784984 000000000133c29c79e16c80cf7bed5d043d6da0d554ad4ff8163e2952dc1a8c +1785019 0000000000f87ea0cfb585e7eede64a5ff08df773c14b400a29b0d1d199d4bc6 +1785051 00000000014e36a3b927a637f897bcb05638761f31ef5932f4200383debf0a18 +1785081 0000000000319ffce5a925297b681f7e9e65cd7ceb74115398f852541e749618 +1785108 000000000113555dbb91a0915b1366be8a99a83232a4fb0c6da911a331b46fb8 +1785138 0000000001b5eb15f2e9ddc203bee78539cc1e4ec7eade0dd65a228c334810dd +1785170 00000000003433ed0a25ffd0d4e9481004ff5c4203ec558d1f96ed937c2d0b39 +1785200 00000000017d82b6eb86f43b2142dcd4d3bae6a4b5c1430f585cb26b9095758b +1785233 000000000198b1aebe125faa32570fd7da54c1520948990baa300e0320009ee6 +1785264 0000000000c1dbde55aa0d030575ed893ff5c04cf8f4bdb18856a346691fccd7 +1785298 00000000014a2bd06f9d921b104a1ed79886f723d6915db7ce106d81f3238973 +1785334 00000000000343359804677ef8feddad90a0df6d5d72f12b8df7a9fa93fadfdd +1785360 0000000000e04d7253007fa8c2e510c4cab60b1b4e0bcc2d4f05fab8068b7b83 +1785387 000000000157fc5fe5fa1c2ce5b11d1f0b8e39c3b36d3773b499b8045afd0685 +1785417 0000000000cb0b8b47ebbbeef455ea07dfbe7bb979fc890e2e46d6f398089358 +1785445 0000000000f332587a9a970f6a16208d08923d461607cfcb653b478c15c74608 +1785470 00000000001dae5fd0cace56d549906640d1d7a03ead72878295a51c21a45d32 +1785498 000000000073580b030e67c841e2e4be0e3c9d177e4ac543ced44c518c4620fb +1785528 00000000018acd39a6fcc52cef632c9015f8109bfa149ae3ec5da5baf5a030f9 +1785558 0000000001cd1ddb34f0e48224911adfaf5e38cc9f04b334919a119a0d8168dc +1785591 000000000117e9566555680d63c112c3032047445a5d741019278306edc6ccc2 +1785621 00000000018cf00862b0be27efa8c9fade71cf6ddb5870cf5515e79c830b9fd3 +1785649 0000000000287e64dabdf1bd630e924d6a6f87bde411efeb1f9daac2e52fb065 +1785679 000000000126eee5c71167a1019d5905d4c5c77ce569b2f1149533a80c06d14f +1785707 00000000010aa105ffdfa97ee0ab5778b0a26cffc24f473d29d0c660f9f7c774 +1785733 00000000000bcf020b149a1c0c1b6bb6a8b21b84718606b27e00530bb801aed9 +1785758 000000000041fa870fb0ead2136224d1bdb2f9145e974a29c86c86b1f6028e59 +1785792 0000000000d6c2872d7dfc05583d7567fba3f9f073b16c68b514c3708bcde5c6 +1785827 0000000000c8fc22c54a7f15222982107128330fd7e25b385485f358e6e0b343 +1785856 00000000011d9f01c29be5cba8b1edabcf9a3ad6fd627bc7b0840b9329b0a57c +1785882 0000000000208924405bfae8d8ce04de8caba6d903233f70e5a067d38726b189 +1785918 00000000015e4122d9801d6b57ab38ac1c2040c8afca26c4421942094158a8e7 +1785951 000000000005cee1f0bec0d89e63209a0e4c3ab6f3ebb70beb4f8c91f370665f +1785977 00000000010fac6810daac82ace6ba88faa0ac6b5283ac8a7ddf090e33c8355e +1786005 0000000000b4a68060b57f44afff6f3dfac424e36622fbe80b03cc7131b5bae4 +1786032 0000000000ec7803409d00a14844cc0fedee9a3299fdcb48179b8699501b412d +1786064 0000000000fdfd45248b623b20f1e9970b2468dce72ab751a9fda814b686b678 +1786091 00000000012ae660c28ba3766553fd932d67727231b40faae00f6148cfe93e73 +1786127 00000000014088861c702cc955c5e3ab4665aa7075c977dfe30483117ff9c90c +1786155 00000000018da7a682f428a89934df2f9d1b365ae2c08ee467327503f2ef838d +1786190 0000000001257b5b2b855c5b9297819669dbe840f40e1f564379426a69dd0169 +1786221 0000000000465bc4afc3f554d33887d72f05323c70d51f698cd13aec850ce27a +1786247 000000000044849fb1c91a56e0e209ad3ba07d66e2a3524105dd18740a1b9c3c +1786273 000000000047635431f63737a952bdd339c694688d7b228048de4c82ee10d0d8 +1786305 0000000001152884422545d6e0df97042c97cf2c4733384d9b2cb8ec0511dca8 +1786335 000000000097da964b7a343886bc15b825e7bc01da51eec0c477fd09a3df2872 +1786364 0000000000fed59121a5bb097ffccbbf13a9fbd31d5b66915740202812be8762 +1786393 00000000013acdac74740743d874209c706a18d28f078164c9ec144b27871a9d +1786421 00000000015d1d378e34d9f9b4056af88fb121fc779925573865024956c599ad +1786456 00000000012578954c869d13985d14845eb871615f90754d7a837e169b21f056 +1786484 00000000018c2cd28372f345fa39e64c3dea1ad555856edd1503816e3080375e +1786509 00000000000eb597fa6ad347ea6671b47e7d89a6ba7deacac60d031d2c66adfc +1786543 000000000013659e1f55e2de467d591cc4fddc2b9a25792bc11839091db2bd19 +1786573 0000000000d95ae50b37dc6d5ebb30c0fbd53857b79c33af5ea396a17890a384 +1786603 000000000177dff5431aa034dde86f2aeb763a0b217b6d1bb6e21ec49187a057 +1786628 0000000001632e4568949fac99aa33f3c74f372e2d86547c123793c7d0f97ddc +1786651 00000000008db847ace0025e4b444700feac48eeee3f21e39aa7c51edcb44f0b +1786686 0000000000f6a284d2eea60d1f168cf0ae214578522c98033718eeded13c20c3 +1786712 00000000001d2144740b3fd5763c005dea4ffd246f904392c190a025f23c552a +1786739 0000000000d921c99748b0e40aac02b8c53f2567ecbcf765e1d43032f2cdc9a8 +1786762 000000000055aa61696a635b3e4be82cec6df631761c0c6227d0255d22c6ab73 +1786786 0000000001574cca72de1ac88b2a881a3b25e0a3a7808a90be9d6732dd994cf3 +1786821 00000000000ff51fdcd4f61bab8c176e1926aacd5a51ff1bfa69d045b259b362 +1786849 000000000001587e8a82ae563b0f239011229f4a502c5b71abf0aa43174c4214 +1786881 00000000000eb8800ffca801ed282fdf7688a6c38540957877e73e90f0301cc3 +1786904 00000000003b6d9d2fe6d7c223bcb995269feb9c30b64a43633af00054b2e8a4 +1786931 00000000012af3f26980804b3fe478ea0f4d48d8b6a03e5800a1d6143163b0d1 +1786954 00000000003c8fbaad0e0dd24fb1e005862bb489127876e501d1d5dac094be9e +1786978 0000000000ef8a7374cd3614bc896fd60b6a60d798c54816a50f763a3b02cded +1787011 00000000010c72ae6dee94dcc773316be39b1f7c1c8a289d54ed3d99cb938e50 +1787044 00000000009e4e28445f57c8ca8498dc7ab90e697c6ce50fa7958b38f296c366 +1787068 000000000004e85b1515eea500e11ab704e1be97c4fffa8032e6a089fd75478d +1787094 00000000012aa6c730df5862e1bff664a0ce7ef7f2f8b880be9a5e1e2044872d +1787118 000000000057ebe9d4124717f0987362de879fc7e7407c0c8b924d6da53cc68a +1787147 000000000177d5b64ee31938caccee87ab78cf80208348bd0bc2e620f55318b1 +1787177 0000000000f3a8edf7f6d38c594de696f70dd7177c9b753e39d42806711c3e3e +1787203 0000000001b415415e50b4ffe8d8d19c01f3e5c7a384441b560540c91429bf61 +1787230 000000000169ecc3c1ab432969a3bdfeeda1f8d85d7ef892eb8216fe8c5a2634 +1787257 0000000000c5896f0c379f919660dfa2755492e0b1ed4977742e62fa3e5e1254 +1787281 0000000000d5ba0844c578e028843076b4b87bee2a1f7914e6966f991b546abd +1787312 0000000000f5648c0b9c60175f32a91ed383edd2d450aa445651013e3f6d4912 +1787338 0000000001a9d215978eb0b651f84a422e5e606b47b523b4d5816c6be500b8fb +1787363 000000000062dea0baf3064bf25b80853209c63fc2c2c8cb0972d8b5979f80b2 +1787399 0000000001049b21f1d4d8a7156a17e0b45338ec7c3469a2270c153e770afbe8 +1787504 0000000000a10c77baff6d9b16cc17a876de633db0039b956d8a422a3973b9f1 +1787541 00000000000c2402ef0077d6375839681d993804dd141c4c6f27f234a2a9c50d +1787567 000000000087aee3723450cdcfb14852f4ab4765295bfda3a7778ec9af973417 +1787592 0000000000d3135b4e9713f874b6a40710a3a01dfc953e5dd410cb1a7401dd4b +1787621 0000000001b0077491b54493fccf4e6e7456f7191eae71015161361601b29cb1 +1787652 0000000000911d338231bfe0f5f91893570f3f3d4bcc68bc00086d60b8681654 +1787682 0000000000c8c295d0894eb30e2d5493f74b79be9c7a6422d511a11a6b924916 +1787708 00000000007ddf220738f3360365952898bd6d20d9f19822cbec786797432331 +1787738 0000000000d14c5411d693368c4586836285392a7e2d5b35d6df4957cab3035a +1787765 00000000000d556e6b26c647523669d63791d018d4d82a9ed0b94c8567185ad9 +1787794 00000000008883a151c35149b61d27fa8a813c51512b090c11d93a5d2049c108 +1787815 0000000001778938b831af5d372787872f1ce4e058cc6f47fba68721972a65bf +1787841 0000000000d08cd1a96b2db6b2801a5066afb838434e785e4a1fc33300c123d4 +1787870 0000000001584c46ddb1d7cacdb4c3e789089379c198d88d6995d7236e728c1b +1787902 0000000001ba27fa9e1d09a69f9427e3e3b01bd23c6493842cc7fd173d752321 +1787925 0000000001b0feda87cb7b5c902737b7c82af9e63956c1e822604ce52998e5c6 +1787948 0000000000a7ec880e05fcc8820d9d2859cc9696226eda00618ebc789ad0749d +1787981 0000000000cb99557cd45e6518892ec1d08ad77a3c8a4570dd2854f88cc4e69f +1788019 00000000015be7ab938b067f6c73265de2f3da66c1f9482af5d8603107aa9984 +1788043 000000000015265368fb246d0c89a575b03581f377c6ef36dc6c4c2f0bd0ba31 +1788075 0000000001885dfd4a4243d48ce6498707280af96b50695b3ae0a6841e4278e1 +1788103 00000000004f2dc9008c8cbaa659de3c61bc8e2338ffcd6137c279c5f5b28172 +1788128 00000000002ae6630114080647fa6c8e423c422ca01991d4112cd434c651b40e +1788156 0000000000dbb7f25e0b9213d3c2504fab58604c075109a2f545abf7f28a0323 +1788184 00000000005ebf0a21512723fcdc3cf97d026b89a373ab8fadce8cdc53e388e5 +1788217 00000000006a386962ec8086ba7e28dc4c06eff80f20f318d4e7d7aeb0e5dddd +1788244 0000000000cade31d53a2dcb627bd5533aa75c8f6a443c306fcf0809787abc9f +1788271 0000000000449ab02bcb030161b9779ebd25e10580cadb5926b877cdc2c7902a +1788298 0000000000f5b61e9e0801e37b6c9e9d48c2cf3803a7fd3dc32945101381fe3c +1788324 000000000158f7ece4f79aae5e05bc24771bfca107cacb0f31d663ff84faf599 +1788356 00000000013b135da4e05bb1492d00b0fedc8b726866dc907c8d8326a4996185 +1788383 00000000015befae4fb6542dbd14fb677d8edb89033653d28731adfe2812659a +1788412 0000000000fd486094fff48ca2c06a13f26c8f7fc956dbff216cd7cf7bbd181f +1788438 0000000001268058bcf4c98891b6d0e590b465a29e51a271f1fde146eccd8df7 +1788462 0000000000cffc51c8a23d5107d181ff8fdd11a82785628a202896d8ca8f31a9 +1788496 0000000000589df609df12dea9e3057200a806f9a148b978f5fa7ce7ce54e612 +1788528 0000000000a270a8c742b256a2b43544db1020e4958e529875c672a9692b00be +1788561 0000000000a3e79f65b756c2fdfb479c154dcbdbaa3032a038d3f5ce695883b2 +1788587 000000000029edcbed330985923afeeae7b72c917be44d5d59ee758878101f48 +1788612 0000000000213d810dacf60efe3517ef5dbce9854bcefabc5601edbeeb11e29f +1788641 00000000013156d167b9a3109445677737de3e5aa260832baa6d73ba31512aca +1788667 0000000001060171fa8376993834de83a9989c555c554c13908a23fac700831e +1788689 00000000003eee112dd14c6a3850f41928c6b378b17d45c0b20650ca1de0fcf0 +1788719 0000000000198d0bb62a5f438961b438f51745671b7e47fa3e32dee86554ca21 +1788747 00000000000f9c1f757a66eb787aaf447d238153444de121a1f3f33514229552 +1788776 0000000000bbff7dee656ab80b8d8912ed26cfa7f42c4ab190222a44a454660d +1788808 000000000014a9e685d844de5ecc75bd7a8ea192e238b2a9b0ee29e72f7ce3f8 +1788835 00000000002720f004291ebae4b15d8f1cf6dff23e23bd71f6cf8ae635ab9b02 +1788864 0000000000c8945f85f58c570892d4a1eb5b646108ceb246b94d2930ed4a879e +1788891 000000000036566096a998f954e01dff3f8ace9774c1e72dd16bfa750457f34a +1788919 0000000001690ee967b5b47a1917d50d98e5e5bc2ae92616373844f7573b828f +1788951 0000000001577c2dedbccf0cf53c40aab6aaf6ddf3084e066332e798c04ec425 +1788979 0000000001140d9a24424c57f43a5547a8f0db169b98b458c3d6d54b4f51438d +1789009 0000000000ad22d0d6a4c75406ba49966fa6d8fdc79df7bf7a15b4f89e713912 +1789038 00000000005bb35eeae98984a8abe078f980d19ab9135f7e31fe420d3f1e64d8 +1789067 0000000000d491d21945d65b574d4fcef8d585374af733ff2338f5f03e099b61 +1789092 0000000001577b00f3ae124e460ae527983a6d6aefb2b024c02fc0f8220b55b4 +1789122 0000000000576d3f0b697f7096c0e5b040d2051ae9e1789de1ea23bb8ab130f3 +1789147 000000000088dbeb92cd2fa2a83d03eb8d4b7995e13e84b660760a65efabcd41 +1789180 00000000008aab05ce0b5b879a8d640652711ea009294520bb6712772814cd51 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index 80f9119278f..d92780318b3 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -4932,3 +4932,111 @@ 1972400 00151cf3b9cac32b42df0b40e32d8d6e66fe7acb48b28e6785df6be0db2edb4b 1972800 001de849b64c29ce175067ffae27f5555d7db1a0ae92410a115967af330d4999 1973200 001e1a523c85be6d46d51dbca495cf866fc9a989ca9f1105186c55dd148c9528 +1973600 00265763c10579b315daa55c9ab74cfb48b41c2b74c5e24485d402edb68e5dec +1974000 00380a2619a13f318c816e8229fc9602791df59feadc8081e5bb0bf3fe906317 +1974400 005cc388579329523f82c20970dd1e7b39108c32ee9bf306e5730dcf3e8779d3 +1974800 003df977d139a2b3b79d62401df056716b5e755ec25edec0fd2a74ab470ffedf +1975200 0008619d33a1533c1e180b9cfc3527ef1ee1fe456a5532f15f5789e4d5d6bda7 +1975600 003b97a8b01c012ff04fb78720ad9885c684a8ea93d307c86218584584e7d60c +1976000 001882922994287cb84abd84121b08986e3159fc4de500c922ac81f2508e1256 +1976400 00036dac1a6d73476853f8a2bc5a702851cea7ecf2fb8fb4227caabec94103a6 +1976800 00353ae88fd2b4ff5cd0d2bb79ebf7eff66b27f42a264058bef7d93fd06b0a3a +1977200 001e12bda961b4e8fb6c51a4f288ce3f99dfb88c7eef3fc6c2ec1742692c4629 +1977600 001f2ab6744a11312bb50b3003e703e579fbb1a1fe5a481be17ea82e6671fba2 +1978000 0001ef2cf696b888d0cd805992a5566b1394649d913468639b66b457eaa76423 +1978400 001723719dd4c65eddec54105892729309aafa06c418737c90dcf3a34180509e +1978800 0025a185a705fe715932bf3379a8d2bcacc038229943b45a7a61a496ce469360 +1979200 000730a5b616d5a4b9f842b2e5c61f5f80eb5604917bf31b8085c7635201953f +1979600 007b1d2a72a79ffe2fd778897378ae3f597d5770949e52f291dcc566e056eb89 +1980000 002ed4cb2b1ff42fb3c0a2cc937fa7c6593c73c0c8688f3ce6c398419a4c0af3 +1980400 002fa0d4fbb90811caa0312502bf1683d50bf08143441ff2c429e4a62199101d +1980800 0030a7699a80f9f92a51707d1b03e701dc57df67db97221ee93996c2d53e9d4e +1981200 004ae1a3072fa412c25fad190c9228345a47df0c48468e773857adf1fe8bf684 +1981600 002b6365200379fa409d4b329e53e5cdb7fc2b1063aef07c1d7c204f42357da3 +1982000 0043384ee750cc5d113d8ce750658659a08be423683cb43ab359270a61104530 +1982400 003eb30a6c22f1c35ef1a4a1090a9c60f8d67f673a2f97c20a13945e7e93b778 +1982800 0020499324f2314b0c204662f643a134875c112f6b46a833296231be915e2b96 +1983200 0014df08314fa23c2f88fd3ccdb11a3b8e4d335ee0312a932e44b8f69dc6e4ef +1983600 0000d25806dcfd4f3d1be051ae453b7954559d73d027d8a17097f8aa472d74e4 +1984000 00a89fd5734399d90f7820a12f27a09778cae1446e38c8acc67fcf1e41ffa996 +1984400 0024645d2368ea310dcb514d574c5802ba1a60a2823229a9f4f92f72f875227a +1984800 004231d2c94d71ab7276c4a0febb851f1baa3e0639639fb5e8d98ef21d785a75 +1985200 000ce9f81bd12fdf53ec461985271b839e748b4130c429d6b77f639d4c182541 +1985600 0055daf6c7b9e4e727b0b3056710f14b284b94f8a50b2e577df2b8f4b759762d +1986000 001a0ed99082ecef987997765ac7aabec31d051758bdfd3fd5fa33179c6950ac +1986400 0069056e6f492b0c7c5d26ddc641250a46125431c268955c3eb841255496c9a0 +1986800 005d42dac42182abaeee8b3f8460668c8d3599e3e4d9bc1b69fa6e03a2aed1ec +1987200 000703886a860a480f6b646b600bc7e48cef146550e74759bd531aff137b2e8c +1987600 0034245610fd597c608e3850c88f7816facf1a8a533aa61b29a27d8e81366217 +1988000 0016e24a98897e6d52ef100d475b23aa576454c3c018e04226214a8eeeb2302a +1988400 0044005e9f34f9cb3ff5cb132a103d7102e0da3e7b87800727a1003f96c7b68d +1988800 006806c79f956ec1166e31ec4015fc7bae36928b874a1f56d0b95d296b775f19 +1989200 003571b30b3edb1f37dd7e0dc8f4b490eda9212eb4c0888f114ceca0f08c2929 +1989600 000b5ac6b6faca304cd87b9f532159164e498e23c1a468be17df57e1ce426e70 +1990000 004167f8881a1e0d6407ddebaa77dd2ddb9d5d979f5a489cbb31f6797ac83e35 +1990400 00403f521a08ef0f3717e41621a4a000ebf956a1134ab35b465428ccb01a9afe +1990800 0027eb22a910ca978fd9cd77a0e86b700dbd87181ede945f77f8fc1d39209d22 +1991200 000ea6a0a384ec066f41514c28da4e4ccd402b2390a72a20d8276469d5014fe3 +1991600 000e8d098fc2fba518aa94870110ce188b986eb35ae89c53ba4dc9f3b0c944f6 +1992000 003ecef7c8b80aefb14a16a0a82b9c8da19ec6eebc96ea991515b5cb6cf7e5d5 +1992400 00d25e99c500c46d96c2fc78dd23d308edb47fce34db190aae4bf8995c076f52 +1992800 002d48f4791b6dfb65da707b435d22264f41583fa7dbaa6ed603ad683161180a +1993200 002ed8f8aa9d76f4c430dcd2f6d55a822d107a72cf08344ab0868963b6a50b6e +1993600 001ea4bdde5852ad6c59c92826137dfb9e427238d42e067825d5263376981cca +1994000 0016d04154743e9480d600e3ae920ec95ffed3a735e2d324ab5692b2bda2b27b +1994400 0034fe9f2debf4d06b5bc5fff774efbfa2b7dee10c46fc0953c899e5032f4b6b +1994800 000156f5d02b3b0e6ce4ae2d9f42c75ea31efe92781f3e4867c7925ae29484f7 +1995200 00275f0655e79b403937a7c6fb6b07fe68c3891bd822de829b6915e9f8cf3e92 +1995600 002d663e2a10203911b63b66e819056cecd74db736b9ff2728d380fa34d4b7ed +1996000 003b1fd3c35ac4bd1481d9bff3fa69443aa399382463d62afa85707236df03db +1996400 004ef39b14412fb2390675beb172f5ccd05c4416c3663ede3b23c975cc419b76 +1996800 002772a25d457cfae8ae0a181a4e319130082dcfa7552609e3ced155bf287fb3 +1997200 0017a0941690b5d40d9fb771817c1aad0a5b03ff21630fe11e77b2f3ad0b446c +1997600 00eb865f4674e11f723fe404331876e2f2e38370815d755880f5bc7eb2260257 +1998000 00213a4658f55a4107393e2bb01079d92491724fa661eed17185ce522cb338e9 +1998400 0003e78ca9f05d662fabc66fc76094560212f804659f2c29ac044f65df6e13ff +1998800 00324bb06cc2ee5dc16a2239024838eebc673d21403bb15176b59e6ca44df95e +1999200 004a95c1c4d6c53834e40e6991db3d2fa437dbbd32c7e7e10a1565128394ba25 +1999600 00b7c62356e851758b422cea43d160767ef90aeb9ab59ed889e0e4d47f2fc7ce +2000000 000bb3c6d575a155a56b1aab0e7f14a1141d25c1f36aa6f15c1cb1ef1d7d498c +2000400 000fc5437e147e11d798b3dfeb205c889f867fef123f7e76d0b860838dc8d3d2 +2000800 0074188385c91c3c5f1eaeade0cb2db60223f022a81b44c84f52e31e592f7852 +2001200 0033d743f48882ed571f94bd284663a32d8016f328b84bd1f518fecaacbf7daa +2001600 0021a2f1d79b08e879b6565ab4196d85fce9f22e2d974dc4ad7161d61fb48187 +2002000 0091cd30508417e28b7d7c993e92b3dfedc115800cbb76ba5035c922205f06fc +2002400 0043d56f9b5c21d2a394fba8c2c195726ff6b7e3da5bd294dbdf0dc6ac55f1ee +2002800 0017c1b449adae17c5ac93aef0f7b00897d7af87402c4955a35d1b5306b62a07 +2003200 00378c94df1236c43e542f6a672e0af6d39a85d9536a566334a683a13b0ce2c0 +2003600 00782f31f42ac7289795d825c9fd83d9375990d21ec2c8eea5cca8b5c290bca6 +2004000 00728b3cf3d0d06b2245ef5f592db7dfa80fd95d0bf51c870ff8f8e163325136 +2004400 005357d4d3b926a7f39c9b451c31817c086c27fc81c7a0757a3127d65880eae0 +2004800 003702719a3808ff5f569733b13f981fab5f649757371db837b96f8cb4812287 +2005200 01410801403c74a981c27c98050b6617d8dc0d4e347b1bd8e00b3e23e2ebb3e2 +2005600 000525e53397812e0c7c76e98ffdcf8fa86165d24d95a7199f71a608dbc899e1 +2006000 003ac88438d68ca81dfecd1128026672a4abb15f7b60adb4f3c3bfb378c7fa84 +2006400 054ab51429a7ce4a4251749f7e48dc919c53b3bf22b1214c374a44ec339a2188 +2006800 001284eab9bd359607256db96da6cc443507851c1d859002241e9f20a924f978 +2007200 001257668e26b3b8fcf71b50c3040c82f7dd32642285c60bcfcc795bf41a4b69 +2007600 001f72fbb099f9f91edce1ec6005d8d732c4f17f8312d1ed27edf5d54f3e5b0f +2008000 0032c22828be397e5cecb8316cf704121a7c87b52458df1ec882f588bee870a5 +2008400 002ff2ace367c90aa9a815ab64826251eff4dd94c598fa008f0164ccd72c9c0d +2008800 0022a1b1556bc6299c9fdba4d034ed6261ae96197370dfe45a8ae29147ff817c +2009200 00314f78c6eb3a9408381015b839a97833d16491472c40b4a546d4032f336ce7 +2009600 005215ec6cf903ce65345884385a46b3e56135c1ea751473e9577047a7ae40d8 +2010000 0013159a578c874aeecddad8707b8e274a018078fd80fc3b9e2d04065abeb05d +2010400 0045334396a5c38a6247486527e428ee484f3eabe8b7bf5c609123acefbc4788 +2010800 000a5490e947a289e6cd853f4ee9c25abec74a4cc5ae36318b2bf5702c473842 +2011200 000ba27d9ec1ffdd812b0891ab19d24b4b3cc98051f8ee49792d224cda7c9b72 +2011600 00415fac1365dea3631a0cf0982c7904685eb038d124b8e34855efcd6b176d5a +2012000 001d838c34eab41c3d8be383b51019ce005eb727383e6c44cf7f4df85af2bff0 +2012400 0061eb8012e20ef1b49222d31ae577bbaefc52fff13e298f58f5fe992187b18f +2012800 004b9a0d44a9d495150b862bafaded9d78ac735081eda67193fb5f26fc66db8f +2013200 00234553897514b604cc1e5caedf8b8cb9472dde685cdeb77ed2632a8609c7e1 +2013600 000f26bb5542b352e2888aecca32fc185b9bec891aa92b7d6f2643f731ab03af +2014000 001cdfb32b44cde7999ed70e1f526a6eb6f924d0a17040795b73ea4b883afbe8 +2014400 00008a3c7c6cd0a6096a5a471e933c0e1a669a70ef5409c077dc47880fcba97c +2014800 003494880adca69cdc279f0d47dae210ca890e8a60b87cd113c4b47c2c56c8af +2015200 002ef04af2d74e1ab4acf04d9e44cee49b11f252b04837c4c12c3f5473f0454a +2015600 002587d8f4d9ee8d1c3b2b443f4adbdc2b4742a3891f6a7e564902f410cceb2a +2016000 0007ba2ee7d42fc13744df43f4b962b7a94e97d4730c11e612242d4af2fff114 +2016400 000d327c1d00347bc21ffbad019a2654bb7ca37ee95794e315afa1f2a7e5bbea From 96861344862689f84a76ec5d8b657475e4b2b2d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Aug 2022 05:29:23 +0000 Subject: [PATCH 25/42] build(deps): bump futures-core from 0.3.21 to 0.3.23 (#4915) Bumps [futures-core](https://github.com/rust-lang/futures-rs) from 0.3.21 to 0.3.23. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.21...0.3.23) --- updated-dependencies: - dependency-name: futures-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- tower-batch/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a38b2f61141..42aa7ac2bba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1677,9 +1677,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index 45d3adc7aea..397b3b0fc96 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] futures = "0.3.21" -futures-core = "0.3.21" +futures-core = "0.3.23" pin-project = "1.0.10" rayon = "1.5.3" tokio = { version = "1.20.1", features = ["time", "sync", "tracing", "macros"] } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 1a917162aeb..d3c9e491901 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] pin-project = "0.4.29" tower = "0.4.13" -futures-core = "0.3.21" +futures-core = "0.3.23" tracing = "0.1.31" [dev-dependencies] From d692c604b7eae29a014cd468390e52df9fef5604 Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 29 Aug 2022 15:29:38 +1000 Subject: [PATCH 26/42] Update sync workflow docs for edge cases (#4973) --- .github/workflows/deploy-gcp-tests.yml | 35 +++++++++++++++++++------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 2a062cbf709..3aaed05cef3 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -268,9 +268,9 @@ jobs: # - To ${{ inputs.zebra_state_dir || inputs.disk_prefix }} if not # # If there are multiple disks: - # - prefer images generated from this branch and commit, then + # - prefer images generated from the same commit, then # - if prefer_main_cached_state is true, prefer images from the `main` branch, then - # - use images from any other branch. + # - use any images from any other branch or commit. # Within each of these categories: # - prefer newer images to older images # @@ -291,8 +291,13 @@ jobs: # Try to find an image generated from a previous step or run of this commit. # Fields are listed in the "Create image from state disk" step. # - # We can't match the full branch name here, - # because it might have been shortened for the image. + # We don't want to match the full branch name here, because: + # - we want to ignore the different GITHUB_REFs across manually triggered jobs, + # pushed branches, and PRs, + # - previous commits might have been buggy, + # or they might have worked and hide bugs in this commit + # (we can't avoid this issue entirely, but we don't want to make it more likely), and + # - the branch name might have been shortened for the image. # # The probability of two matching short commit hashes within the same month is very low. COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" @@ -1096,16 +1101,28 @@ jobs: echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> $GITHUB_ENV echo "TIME_SUFFIX=$TIME_SUFFIX" >> $GITHUB_ENV - # Create an image from disk that will be used for following/other tests. + # Create an image from the state disk, which will be used for any tests that start + # after it is created. These tests can be in the same workflow, or in a different PR. # - # This image can contain: - # - Zebra cached state - # - Zebra + lightwalletd cached state + # Using the newest image makes future jobs faster, because it is closer to the chain tip. + # + # The image can contain: + # - Zebra cached state, or + # - Zebra + lightwalletd cached state. # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }}. # - # The image name must be unique, and be 63 characters or less. + # Google Cloud doesn't have an atomic image replacement operation. + # We don't want to delete and re-create the image, because that causes a ~5 minute + # window where might be no recent image. So we add an extra image with a unique name, + # which gets selected because it has a later creation time. + # This also simplifies the process of deleting old images, + # because we don't have to worry about accidentally deleting all the images. + # # The timestamp makes images from the same commit unique, # as long as they don't finish in the same second. + # (This is unlikely, because each image created by a workflow has a different name.) + # + # The image name must also be 63 characters or less. # # Force the image creation (--force) as the disk is still attached even though is not being # used by the container. From d78026466e160643465e8e384582bd0d94acd3a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Aug 2022 06:57:51 +0000 Subject: [PATCH 27/42] build(deps): bump reviewdog/action-actionlint from 1.27.0 to 1.28.0 (#4923) Bumps [reviewdog/action-actionlint](https://github.com/reviewdog/action-actionlint) from 1.27.0 to 1.28.0. - [Release notes](https://github.com/reviewdog/action-actionlint/releases) - [Commits](https://github.com/reviewdog/action-actionlint/compare/v1.27.0...v1.28.0) --- updated-dependencies: - dependency-name: reviewdog/action-actionlint dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 668c3229a22..edd516ca4a6 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -145,7 +145,7 @@ jobs: if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - uses: actions/checkout@v3.0.2 - - uses: reviewdog/action-actionlint@v1.27.0 + - uses: reviewdog/action-actionlint@v1.28.0 with: level: warning fail_on_error: false From c76a954033481b69ff2300826f21a0d721a7f4b0 Mon Sep 17 00:00:00 2001 From: Alfredo Garcia Date: Mon, 29 Aug 2022 03:58:30 -0300 Subject: [PATCH 28/42] send starting height to peers (#4904) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-network/src/peer/handshake.rs | 5 +---- zebra-network/src/peer/minimum_peer_version.rs | 12 +++++++++++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index ae7df8aeb76..2b57fc21824 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -27,7 +27,6 @@ use tracing::{span, Level, Span}; use tracing_futures::Instrument; use zebra_chain::{ - block, chain_tip::{ChainTip, NoChainTip}, parameters::Network, serialization::SerializationError, @@ -580,9 +579,7 @@ where address_from: AddrInVersion::new(our_listen_addr, our_services), nonce: local_nonce, user_agent: user_agent.clone(), - // The protocol works fine if we don't reveal our current block height, - // and not sending it means we don't need to be connected to the chain state. - start_height: block::Height(0), + start_height: minimum_peer_version.chain_tip_height(), relay, }; diff --git a/zebra-network/src/peer/minimum_peer_version.rs b/zebra-network/src/peer/minimum_peer_version.rs index 421915d8f9b..7eb6d6d787c 100644 --- a/zebra-network/src/peer/minimum_peer_version.rs +++ b/zebra-network/src/peer/minimum_peer_version.rs @@ -1,6 +1,6 @@ //! Watches for chain tip height updates to determine the minimum supported peer protocol version. -use zebra_chain::{chain_tip::ChainTip, parameters::Network}; +use zebra_chain::{block::Height, chain_tip::ChainTip, parameters::Network}; use crate::protocol::external::types::Version; @@ -66,6 +66,16 @@ where self.has_changed = true; } } + + /// Return the current chain tip height. + /// + /// If it is not available return height zero. + pub fn chain_tip_height(&self) -> Height { + match self.chain_tip.best_tip_height() { + Some(height) => height, + None => Height(0), + } + } } /// A custom [`Clone`] implementation to ensure that the first call to From e243a357f989769930b523a7e9ac4d70741c150f Mon Sep 17 00:00:00 2001 From: teor Date: Mon, 29 Aug 2022 16:58:56 +1000 Subject: [PATCH 29/42] Fix clippy::unused_parens (#4931) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-state/src/service/check/tests/anchors.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zebra-state/src/service/check/tests/anchors.rs b/zebra-state/src/service/check/tests/anchors.rs index d9bf90122df..34b8815936a 100644 --- a/zebra-state/src/service/check/tests/anchors.rs +++ b/zebra-state/src/service/check/tests/anchors.rs @@ -1,6 +1,6 @@ //! Tests for whether cited anchors are checked properly. -use std::{convert::TryInto, ops::Deref, sync::Arc}; +use std::{ops::Deref, sync::Arc}; use zebra_chain::{ amount::Amount, @@ -207,7 +207,7 @@ fn check_sapling_anchors() { Transaction::V4 { sapling_shielded_data, .. - } => (sapling_shielded_data.clone()), + } => sapling_shielded_data.clone(), _ => unreachable!("These are known v4 transactions"), }; From a87b119a10b631beecc2d5ae627b5f5ad6f805bb Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 29 Aug 2022 08:59:25 +0200 Subject: [PATCH 30/42] feat(state): Store history trees by height in the non-finalized state (#4928) * Add history trees for each height in non-fin state * Refactor formatting Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-chain/src/history_tree.rs | 8 ++++++++ .../src/service/non_finalized_state/chain.rs | 20 +++++++++++++++---- .../service/non_finalized_state/tests/prop.rs | 3 ++- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/zebra-chain/src/history_tree.rs b/zebra-chain/src/history_tree.rs index fcd19fb478a..223d1b372ee 100644 --- a/zebra-chain/src/history_tree.rs +++ b/zebra-chain/src/history_tree.rs @@ -507,3 +507,11 @@ impl Deref for HistoryTree { &self.0 } } + +impl PartialEq for HistoryTree { + fn eq(&self, other: &Self) -> bool { + self.as_ref().map(|tree| tree.hash()) == other.as_ref().map(|other_tree| other_tree.hash()) + } +} + +impl Eq for HistoryTree {} diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index aabfac839a5..ab833c568d9 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -86,6 +86,7 @@ pub struct Chain { /// The ZIP-221 history tree of the tip of this [`Chain`], /// including all finalized blocks, and the non-finalized `blocks` in this chain. pub(crate) history_tree: Arc, + pub(crate) history_trees_by_height: BTreeMap>, /// The Sprout anchors created by `blocks`. pub(crate) sprout_anchors: MultiSet, @@ -161,6 +162,7 @@ impl Chain { partial_transparent_transfers: Default::default(), partial_cumulative_work: Default::default(), history_tree, + history_trees_by_height: Default::default(), chain_value_pools: finalized_tip_chain_value_pools, } } @@ -190,12 +192,13 @@ impl Chain { self.sprout_note_commitment_tree.root() == other.sprout_note_commitment_tree.root() && self.sprout_trees_by_anchor == other.sprout_trees_by_anchor && self.sapling_note_commitment_tree.root() == other.sapling_note_commitment_tree.root() && - self.sapling_trees_by_height== other.sapling_trees_by_height && + self.sapling_trees_by_height == other.sapling_trees_by_height && self.orchard_note_commitment_tree.root() == other.orchard_note_commitment_tree.root() && - self.orchard_trees_by_height== other.orchard_trees_by_height && + self.orchard_trees_by_height == other.orchard_trees_by_height && - // history tree - self.history_tree.as_ref().as_ref().map(|tree| tree.hash()) == other.history_tree.as_ref().as_ref().map(|other_tree| other_tree.hash()) && + // history trees + self.history_tree == other.history_tree && + self.history_trees_by_height == other.history_trees_by_height && // anchors self.sprout_anchors == other.sprout_anchors && @@ -752,6 +755,7 @@ impl Chain { partial_transparent_transfers: self.partial_transparent_transfers.clone(), partial_cumulative_work: self.partial_cumulative_work, history_tree, + history_trees_by_height: self.history_trees_by_height.clone(), chain_value_pools: self.chain_value_pools, } } @@ -836,6 +840,9 @@ impl Chain { orchard_root, )?; + self.history_trees_by_height + .insert(height, self.history_tree.clone()); + Ok(()) } @@ -1035,6 +1042,11 @@ impl UpdateWith for Chain { // This method is called on two scenarios: // - When popping the root: the history tree does not change. // - When popping the tip: the history tree is rebuilt in fork(). + // + // However, `history_trees_by_height` is reverted. + self.history_trees_by_height + .remove(&height) + .expect("History tree must be present if block was added to chain"); // for each transaction in block for (transaction, transaction_hash) in diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index fdf968acaa6..d9398d7411f 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -613,8 +613,9 @@ fn different_blocks_different_chains() -> Result<()> { chain1.orchard_note_commitment_tree = chain2.orchard_note_commitment_tree.clone(); chain1.orchard_trees_by_height = chain2.orchard_trees_by_height.clone(); - // history tree + // history trees chain1.history_tree = chain2.history_tree.clone(); + chain1.history_trees_by_height = chain2.history_trees_by_height.clone(); // anchors chain1.sprout_anchors = chain2.sprout_anchors.clone(); From 893f4950b3abe79859d51e4982feccdf08a107bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Aug 2022 06:59:51 +0000 Subject: [PATCH 31/42] build(deps): bump serde from 1.0.142 to 1.0.144 (#4925) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.142 to 1.0.144. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.142...v1.0.144) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42aa7ac2bba..81a5c52be54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4334,9 +4334,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.142" +version = "1.0.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e590c437916fb6b221e1d00df6e3294f3fccd70ca7e92541c475d6ed6ef5fee2" +checksum = "0f747710de3dcd43b88c9168773254e809d8ddbdf9653b84e2554ab219f17860" dependencies = [ "serde_derive", ] @@ -4362,9 +4362,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.142" +version = "1.0.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34b5b8d809babe02f538c2cfec6f2c1ed10804c0e5a6a041a049a4f5588ccc2e" +checksum = "94ed3a816fb1d101812f83e789f888322c34e291f894f19590dc310963e87a00" dependencies = [ "proc-macro2 1.0.42", "quote 1.0.20", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 32c2997074b..ff5e568ef8b 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -60,7 +60,7 @@ tracing = "0.1.31" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.142", features = ["serde_derive", "rc"] } +serde = { version = "1.0.144", features = ["serde_derive", "rc"] } serde_with = "2.0.0" serde-big-array = "0.4.1" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index c8e55223f77..ead0af4a0f9 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -23,7 +23,7 @@ dirs = "4.0.0" displaydoc = "0.2.3" lazy_static = "1.4.0" once_cell = "1.13.0" -serde = { version = "1.0.142", features = ["serde_derive"] } +serde = { version = "1.0.144", features = ["serde_derive"] } futures = "0.3.21" futures-util = "0.3.21" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index f9c7930e8ff..e346d58736b 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -26,7 +26,7 @@ pin-project = "1.0.10" rand = { version = "0.8.5", package = "rand" } rayon = "1.5.3" regex = "1.6.0" -serde = { version = "1.0.142", features = ["serde_derive"] } +serde = { version = "1.0.144", features = ["serde_derive"] } thiserror = "1.0.32" futures = "0.3.21" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 6bd3b1ff83a..ce45de146e3 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -32,7 +32,7 @@ tracing = "0.1.31" tracing-futures = "0.2.5" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.142", features = ["serde_derive"] } +serde = { version = "1.0.144", features = ["serde_derive"] } proptest = { version = "0.10.1", optional = true } proptest-derive = { version = "0.3.0", optional = true } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 82df95dfcc0..d5fbf969712 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -22,7 +22,7 @@ mset = "0.1.0" regex = "1.6.0" rlimit = "0.8.3" rocksdb = { version = "0.18.0", default_features = false, features = ["lz4"] } -serde = { version = "1.0.142", features = ["serde_derive"] } +serde = { version = "1.0.144", features = ["serde_derive"] } tempfile = "3.3.0" thiserror = "1.0.32" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index ba068e3895f..60245303261 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -82,7 +82,7 @@ humantime = "2.1.0" humantime-serde = "1.1.1" indexmap = "1.9.1" lazy_static = "1.4.0" -serde = { version = "1.0.142", features = ["serde_derive"] } +serde = { version = "1.0.144", features = ["serde_derive"] } toml = "0.5.9" futures = "0.3.21" From d31c3c41770e018342e7aeb71127c0f2abb4d59b Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 30 Aug 2022 06:25:41 +1000 Subject: [PATCH 32/42] fix(consensus): Increase the number of blocks checked for legacy transactions (#4804) * Increase the number of legacy chain check blocks, and improve logging * Automatically adjust test message when MAX_LEGACY_CHAIN_BLOCKS changes --- zebra-state/src/constants.rs | 2 +- zebra-state/src/service.rs | 39 ++++++++++++++++---------------- zebra-state/src/service/check.rs | 18 +++++++++++---- zebra-state/src/service/tests.rs | 15 +++++++++--- 4 files changed, 47 insertions(+), 27 deletions(-) diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index 956306201d5..f4cf6a431c2 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -22,7 +22,7 @@ pub const DATABASE_FORMAT_VERSION: u32 = 25; /// The maximum number of blocks to check for NU5 transactions, /// before we assume we are on a pre-NU5 legacy chain. -pub const MAX_LEGACY_CHAIN_BLOCKS: usize = 100; +pub const MAX_LEGACY_CHAIN_BLOCKS: usize = 1000; use lazy_static::lazy_static; use regex::Regex; diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 4c14fecf4c9..dd458615297 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -206,27 +206,28 @@ impl StateService { let timer = CodeTimer::start(); if let Some(tip) = state.best_tip() { - if let Some(nu5_activation_height) = NetworkUpgrade::Nu5.activation_height(network) { - if check::legacy_chain( - nu5_activation_height, - state.any_ancestor_blocks(tip.1), - state.network, - ) - .is_err() - { - let legacy_db_path = Some(state.disk.path().to_path_buf()); - panic!( - "Cached state contains a legacy chain. \ - An outdated Zebra version did not know about a recent network upgrade, \ - so it followed a legacy chain using outdated rules. \ - Hint: Delete your database, and restart Zebra to do a full sync. \ - Database path: {:?}", - legacy_db_path, - ); - } + let nu5_activation_height = NetworkUpgrade::Nu5 + .activation_height(network) + .expect("NU5 activation height is set"); + + if let Err(error) = check::legacy_chain( + nu5_activation_height, + state.any_ancestor_blocks(tip.1), + state.network, + ) { + let legacy_db_path = state.disk.path().to_path_buf(); + panic!( + "Cached state contains a legacy chain.\n\ + An outdated Zebra version did not know about a recent network upgrade,\n\ + so it followed a legacy chain using outdated consensus branch rules.\n\ + Hint: Delete your database, and restart Zebra to do a full sync.\n\ + Database path: {legacy_db_path:?}\n\ + Error: {error:?}", + ); } } - tracing::info!("no legacy chain found"); + + tracing::info!("cached state consensus branch is valid: no legacy chain found"); timer.finish(module_path!(), line!(), "legacy chain check"); (state, read_only_service, latest_chain_tip, chain_tip_change) diff --git a/zebra-state/src/service/check.rs b/zebra-state/src/service/check.rs index b3a43057a20..aeb7b8281b5 100644 --- a/zebra-state/src/service/check.rs +++ b/zebra-state/src/service/check.rs @@ -297,7 +297,10 @@ pub(crate) fn legacy_chain( where I: Iterator>, { - for (count, block) in ancestors.enumerate() { + let mut ancestors = ancestors.peekable(); + let tip_height = ancestors.peek().and_then(|block| block.coinbase_height()); + + for (index, block) in ancestors.enumerate() { // Stop checking if the chain reaches Canopy. We won't find any more V5 transactions, // so the rest of our checks are useless. // @@ -313,8 +316,13 @@ where // If we are past our NU5 activation height, but there are no V5 transactions in recent blocks, // the Zebra instance that verified those blocks had no NU5 activation height. - if count >= constants::MAX_LEGACY_CHAIN_BLOCKS { - return Err("giving up after checking too many blocks".into()); + if index >= constants::MAX_LEGACY_CHAIN_BLOCKS { + return Err(format!( + "could not find any transactions in recent blocks: \ + checked {index} blocks back from {:?}", + tip_height.expect("database contains valid blocks"), + ) + .into()); } // If a transaction `network_upgrade` field is different from the network upgrade calculated @@ -322,7 +330,9 @@ where // network upgrade heights. block .check_transaction_network_upgrade_consistency(network) - .map_err(|_| "inconsistent network upgrade found in transaction")?; + .map_err(|error| { + format!("inconsistent network upgrade found in transaction: {error:?}") + })?; // If we find at least one transaction with a valid `network_upgrade` field, the Zebra instance that // verified those blocks used the same network upgrade heights. (Up to this point in the chain.) diff --git a/zebra-state/src/service/tests.rs b/zebra-state/src/service/tests.rs index 73a0479a396..4b9e47fd87f 100644 --- a/zebra-state/src/service/tests.rs +++ b/zebra-state/src/service/tests.rs @@ -20,7 +20,8 @@ use zebra_test::{prelude::*, transcript::Transcript}; use crate::{ arbitrary::Prepare, - constants, init_test, + constants::{self, MAX_LEGACY_CHAIN_BLOCKS}, + init_test, service::{arbitrary::populated_state, chain_tip::TipAction, StateService}, tests::setup::{partial_nu5_chain_strategy, transaction_v4_from_coinbase}, BoxError, Config, FinalizedBlock, PreparedBlock, Request, Response, @@ -314,12 +315,20 @@ proptest! { fn no_transaction_with_network_upgrade( (network, nu_activation_height, chain) in partial_nu5_chain_strategy(4, true, OVER_LEGACY_CHAIN_LIMIT, NetworkUpgrade::Canopy) ) { + let tip_height = chain + .last() + .expect("chain contains at least one block") + .coinbase_height() + .expect("chain contains valid blocks"); + let response = crate::service::check::legacy_chain(nu_activation_height, chain.into_iter().rev(), network) .map_err(|error| error.to_string()); prop_assert_eq!( response, - Err("giving up after checking too many blocks".into()) + Err(format!( + "could not find any transactions in recent blocks: checked {MAX_LEGACY_CHAIN_BLOCKS} blocks back from {tip_height:?}", + )) ); } @@ -356,7 +365,7 @@ proptest! { prop_assert_eq!( response, - Err("inconsistent network upgrade found in transaction".into()), + Err("inconsistent network upgrade found in transaction: WrongTransactionConsensusBranchId".into()), "first: {:?}, last: {:?}", chain.first().map(|block| block.coinbase_height()), chain.last().map(|block| block.coinbase_height()), From e744035ebddb34d07f62743592f04962ab722487 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Aug 2022 22:51:38 +0000 Subject: [PATCH 33/42] build(deps): bump reviewdog/action-actionlint from 1.28.0 to 1.29.0 (#4987) Bumps [reviewdog/action-actionlint](https://github.com/reviewdog/action-actionlint) from 1.28.0 to 1.29.0. - [Release notes](https://github.com/reviewdog/action-actionlint/releases) - [Commits](https://github.com/reviewdog/action-actionlint/compare/v1.28.0...v1.29.0) --- updated-dependencies: - dependency-name: reviewdog/action-actionlint dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index edd516ca4a6..66a133b0696 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -145,7 +145,7 @@ jobs: if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - uses: actions/checkout@v3.0.2 - - uses: reviewdog/action-actionlint@v1.28.0 + - uses: reviewdog/action-actionlint@v1.29.0 with: level: warning fail_on_error: false From fc7d0db83f5d022a5c5c3e677b11fa939ac4aae6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Aug 2022 22:51:58 +0000 Subject: [PATCH 34/42] build(deps): bump tj-actions/changed-files from 29.0.1 to 29.0.2 (#4985) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 29.0.1 to 29.0.2. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v29.0.1...v29.0.2) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 66a133b0696..254fe14cd83 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,7 +32,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v29.0.1 + uses: tj-actions/changed-files@v29.0.2 with: files: | **/*.rs @@ -44,7 +44,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v29.0.1 + uses: tj-actions/changed-files@v29.0.2 with: files: | .github/workflows/*.yml From e5c0e108879413c80ce8c4847599087bc182f05e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Aug 2022 22:52:29 +0000 Subject: [PATCH 35/42] build(deps): bump actions/github-script from 6.1.0 to 6.2.0 (#4986) Bumps [actions/github-script](https://github.com/actions/github-script) from 6.1.0 to 6.2.0. - [Release notes](https://github.com/actions/github-script/releases) - [Commits](https://github.com/actions/github-script/compare/v6.1.0...v6.2.0) --- updated-dependencies: - dependency-name: actions/github-script dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/continous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 1d34417664d..3b14683cb49 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -44,7 +44,7 @@ jobs: steps: - name: Getting Zebrad Version id: get - uses: actions/github-script@v6.1.0 + uses: actions/github-script@v6.2.0 with: result-encoding: string script: | From c85e482fa0c1bf4a24a34d4af2193d6535b1b024 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 29 Aug 2022 20:11:05 -0400 Subject: [PATCH 36/42] ci(concurrency)!: run a single CI workflow as required (#4981) * ci(concurrency)!: run a single CI workflow as required Previous behavior: Multiple Mainnet full syncs were able to run on the main branch at the same time, and pushing multiple commits to the same branch would run multiple CI workflows, when only the run from last commit was relevant Expected behavior: Ensure that only a single CI workflow runs at the same time in PRs. The latest commit should cancel any previous running workflows from the same PR. Solution: Use GitHub actions concurrency feature https://docs.github.com/en/actions/using-jobs/using-concurrency Fixes https://github.com/ZcashFoundation/zebra/issues/4977 Fixes https://github.com/ZcashFoundation/zebra/issues/4857 * docs: typo * ci(concurrency): do not cancel running full syncs Co-authored-by: teor * fix(concurrency): explain the behavior better & add new ones Co-authored-by: teor --- .github/workflows/build-crates-individually.yml | 7 +++++++ .github/workflows/continous-delivery.yml | 7 +++++++ .../workflows/continous-integration-docker.yml | 15 +++++++++++++++ .github/workflows/continous-integration-os.yml | 7 +++++++ .github/workflows/coverage.yml | 7 +++++++ .github/workflows/docs.yml | 7 +++++++ .github/workflows/lint.yml | 7 +++++++ .github/workflows/zcash-params.yml | 7 +++++++ 8 files changed, 64 insertions(+) diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 57b25707fe1..8dd84409547 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -1,5 +1,12 @@ name: Build crates individually +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + on: workflow_dispatch: push: diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 3b14683cb49..7414e8d3613 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -1,5 +1,12 @@ name: CD +# Ensures that only one workflow task will run at a time. Previous deployments, if +# already in process, won't get cancelled. Instead, we let the first to complete +# then queue the latest pending workflow, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: false + on: workflow_dispatch: inputs: diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 6fc73bf7773..df9656813c7 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -1,5 +1,12 @@ name: CI Docker +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + on: workflow_dispatch: inputs: @@ -312,6 +319,14 @@ jobs: saves_to_disk: true disk_suffix: tip height_grep_text: 'current_height.*=.*Height' + # We don't want to cancel running full syncs on `main` if a new PR gets merged, + # because we might never finish a full sync during busy weeks. Instead, we let the + # first sync complete, then queue the latest pending sync, cancelling any syncs in between. + # (As the general workflow concurrency group just gets matched in Pull Requests, + # it has no impact on this job.) + concurrency: + group: github.workflow−{{ github.ref }} + cancel-in-progress: false # Test that Zebra can sync to the chain tip, using a cached Zebra tip state, # without launching `lightwalletd`. diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 0bf90b8daab..b89400f5445 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -1,5 +1,12 @@ name: CI OSes +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + on: workflow_dispatch: # we build Rust and Zcash parameter caches on main, diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 230afd6847e..b0e8ebea558 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -1,5 +1,12 @@ name: Coverage +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + on: workflow_dispatch: push: diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ddff8841fd7..c797c92e7eb 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,5 +1,12 @@ name: Docs +# Ensures that only one workflow task will run at a time. Previous deployments, if +# already in process, won't get cancelled. Instead, we let the first to complete +# then queue the latest pending workflow, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: false + on: workflow_dispatch: push: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 254fe14cd83..42e3484fd10 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,5 +1,12 @@ name: Lint +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + on: # we build Rust caches on main, so they can be shared by all branches: # https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache diff --git a/.github/workflows/zcash-params.yml b/.github/workflows/zcash-params.yml index b2f59fb0b71..e283c744fa6 100644 --- a/.github/workflows/zcash-params.yml +++ b/.github/workflows/zcash-params.yml @@ -1,5 +1,12 @@ name: zcash-params +# Ensures that only one workflow task will run at a time. Previous deployments, if +# already in process, won't get cancelled. Instead, we let the first to complete +# then queue the latest pending workflow, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: false + on: workflow_dispatch: push: From c18867816987b7180e2f8d18e223d089a78b3a8b Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 30 Aug 2022 12:39:34 +1000 Subject: [PATCH 37/42] Revert: deserialize stored transactions in a rayon thread (#4933) * Revert: deserialize stored transactions in a rayon thread * Add a TODO for the reverted bug fix Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../finalized_state/disk_format/block.rs | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_format/block.rs b/zebra-state/src/service/finalized_state/disk_format/block.rs index 939408c7857..4ce8689ed3d 100644 --- a/zebra-state/src/service/finalized_state/disk_format/block.rs +++ b/zebra-state/src/service/finalized_state/disk_format/block.rs @@ -231,21 +231,12 @@ impl FromDisk for Transaction { fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { let bytes = bytes.as_ref(); - let mut tx = None; - - // # Performance - // - // Move CPU-intensive deserialization cryptography into the rayon thread pool. - // This avoids blocking the tokio executor. - rayon::in_place_scope_fifo(|scope| { - scope.spawn_fifo(|_scope| { - tx = Some(bytes.as_ref().zcash_deserialize_into().expect( - "deserialization format should match the serialization format used by IntoDisk", - )); - }); - }); - - tx.expect("scope has already run") + // TODO: skip cryptography verification during transaction deserialization from storage, + // or do it in a rayon thread (ideally in parallel with other transactions) + bytes + .as_ref() + .zcash_deserialize_into() + .expect("deserialization format should match the serialization format used by IntoDisk") } } From ecf2d80175055cdd27c7a8d5354a9671212f4e9a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Aug 2022 05:14:08 +0000 Subject: [PATCH 38/42] build(deps): bump futures from 0.3.21 to 0.3.23 (#4913) * build(deps): bump futures from 0.3.21 to 0.3.23 Bumps [futures](https://github.com/rust-lang/futures-rs) from 0.3.21 to 0.3.23. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.21...0.3.23) --- updated-dependencies: - dependency-name: futures dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * updated deprecated FuturesOrdered::push to push_back Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: arya2 --- Cargo.lock | 32 ++++++++++++++++---------------- tower-batch/Cargo.toml | 2 +- tower-batch/tests/ed25519.rs | 2 +- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 10 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81a5c52be54..72d36cf7d7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1652,9 +1652,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ "futures-channel", "futures-core", @@ -1667,9 +1667,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ "futures-core", "futures-sink", @@ -1683,9 +1683,9 @@ checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "1d11aa21b5b587a64682c0094c2bdd4df0076c5324961a40cc3abd7f37930528" dependencies = [ "futures-core", "futures-task", @@ -1694,15 +1694,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" dependencies = [ "proc-macro2 1.0.42", "quote 1.0.20", @@ -1711,21 +1711,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" dependencies = [ "futures-channel", "futures-core", diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index 397b3b0fc96..47c2b4ec274 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" edition = "2021" [dependencies] -futures = "0.3.21" +futures = "0.3.23" futures-core = "0.3.23" pin-project = "1.0.10" rayon = "1.5.3" diff --git a/tower-batch/tests/ed25519.rs b/tower-batch/tests/ed25519.rs index e8ae607c1ae..c45e196d2f1 100644 --- a/tower-batch/tests/ed25519.rs +++ b/tower-batch/tests/ed25519.rs @@ -37,7 +37,7 @@ where }; verifier.ready().await?; - results.push(span.in_scope(|| verifier.call((vk_bytes, sig, msg).into()))) + results.push_back(span.in_scope(|| verifier.call((vk_bytes, sig, msg).into()))) } let mut numbered_results = results.enumerate(); diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index ff5e568ef8b..7870d095e48 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -65,7 +65,7 @@ serde_with = "2.0.0" serde-big-array = "0.4.1" # Processing -futures = "0.3.21" +futures = "0.3.23" itertools = "0.10.3" rayon = "1.5.3" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index ead0af4a0f9..5800a84cb37 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -25,7 +25,7 @@ lazy_static = "1.4.0" once_cell = "1.13.0" serde = { version = "1.0.144", features = ["serde_derive"] } -futures = "0.3.21" +futures = "0.3.23" futures-util = "0.3.21" metrics = "0.18.1" thiserror = "1.0.32" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index e346d58736b..5f3776e94c7 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -29,7 +29,7 @@ regex = "1.6.0" serde = { version = "1.0.144", features = ["serde_derive"] } thiserror = "1.0.32" -futures = "0.3.21" +futures = "0.3.23" tokio = { version = "1.20.1", features = ["net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.9", features = ["sync", "time"] } tokio-util = { version = "0.7.3", features = ["codec"] } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index ce45de146e3..b2203e8197f 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -13,7 +13,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl", "ze [dependencies] chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } -futures = "0.3.21" +futures = "0.3.23" # lightwalletd sends JSON-RPC requests over HTTP 1.1 hyper = { version = "0.14.20", features = ["http1", "server"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index d5fbf969712..44557e2fd41 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -13,7 +13,7 @@ bincode = "1.3.3" chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } dirs = "4.0.0" displaydoc = "0.2.3" -futures = "0.3.21" +futures = "0.3.23" hex = "0.4.3" itertools = "0.10.3" lazy_static = "1.4.0" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index a68db9466dd..3d1d9384792 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -19,7 +19,7 @@ regex = "1.6.0" tokio = { version = "1.20.1", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } -futures = "0.3.21" +futures = "0.3.23" color-eyre = "0.6.1" # This is a transitive dependency via color-eyre. diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 60245303261..1e3f72cf5e0 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -85,7 +85,7 @@ lazy_static = "1.4.0" serde = { version = "1.0.144", features = ["serde_derive"] } toml = "0.5.9" -futures = "0.3.21" +futures = "0.3.23" rayon = "1.5.3" tokio = { version = "1.20.1", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } From 3ff56c22cda4504832d4fe57fd6357e8743965ed Mon Sep 17 00:00:00 2001 From: Arya Date: Tue, 30 Aug 2022 05:01:33 -0400 Subject: [PATCH 39/42] adds start as default subcommand for zebrad (#4957) * adds start as default subcommand for zebrad * moves EntryPoint to submodule and adds a test * moves all start tests to config_test to avoid listener conflicts * Update zebrad/src/application/entry_point.rs docs * Revert "moves all start tests to config_test to avoid listener conflicts" This reverts commit 61ce46f5a13907facc3a11326e7a328d81b2be3d. * Update based on test API changes from another PR Co-authored-by: teor Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebrad/Cargo.toml | 2 +- zebrad/src/application.rs | 7 +- zebrad/src/application/entry_point.rs | 104 ++++++++++++++++++++++++++ zebrad/src/commands.rs | 4 +- zebrad/src/commands/start.rs | 2 +- zebrad/tests/acceptance.rs | 46 ++++++++---- 6 files changed, 145 insertions(+), 20 deletions(-) create mode 100644 zebrad/src/application/entry_point.rs diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 1e3f72cf5e0..5fc2ccb8fba 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -76,7 +76,7 @@ zebra-rpc = { path = "../zebra-rpc" } zebra-state = { path = "../zebra-state" } abscissa_core = "0.5" -gumdrop = "0.7" +gumdrop = { version = "0.7", features = ["default_expr"]} chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } humantime = "2.1.0" humantime-serde = "1.1.1" diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index a3e788449ef..6ebd51c987b 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -1,5 +1,8 @@ //! Zebrad Abscissa Application +mod entry_point; +use self::entry_point::EntryPoint; + use std::{fmt::Write as _, io::Write as _, process}; use abscissa_core::{ @@ -7,7 +10,7 @@ use abscissa_core::{ config::{self, Configurable}, status_err, terminal::{component::Terminal, stderr, stdout, ColorChoice}, - Application, Component, EntryPoint, FrameworkError, Shutdown, StandardPaths, Version, + Application, Component, FrameworkError, Shutdown, StandardPaths, Version, }; use zebra_network::constants::PORT_IN_USE_ERROR; @@ -138,7 +141,7 @@ impl Default for ZebradApp { impl Application for ZebradApp { /// Entrypoint command for this application. - type Cmd = EntryPoint; + type Cmd = EntryPoint; /// Application configuration. type Cfg = ZebradConfig; diff --git a/zebrad/src/application/entry_point.rs b/zebrad/src/application/entry_point.rs new file mode 100644 index 00000000000..bbcff6cb8e7 --- /dev/null +++ b/zebrad/src/application/entry_point.rs @@ -0,0 +1,104 @@ +//! Zebrad EntryPoint + +use crate::{ + commands::{StartCmd, ZebradCmd}, + config::ZebradConfig, +}; + +use std::path::PathBuf; + +use abscissa_core::{ + command::{Command, Usage}, + config::Configurable, + FrameworkError, Options, Runnable, +}; + +// (See https://docs.rs/abscissa_core/0.5.2/src/abscissa_core/command/entrypoint.rs.html) +/// Toplevel entrypoint command. +/// +/// Handles obtaining toplevel help as well as verbosity settings. +#[derive(Debug, Options)] +pub struct EntryPoint { + /// Path to the configuration file + #[options(short = "c", help = "path to configuration file")] + pub config: Option, + + /// Obtain help about the current command + #[options(short = "h", help = "print help message")] + pub help: bool, + + /// Increase verbosity setting + #[options(short = "v", help = "be verbose")] + pub verbose: bool, + + /// Subcommand to execute. + /// + /// The `command` option will delegate option parsing to the command type, + /// starting at the first free argument. Defaults to start. + #[options(command, default_expr = "Some(ZebradCmd::Start(StartCmd::default()))")] + pub command: Option, +} + +impl EntryPoint { + /// Borrow the underlying command type + fn command(&self) -> &ZebradCmd { + self.command + .as_ref() + .expect("Some(ZebradCmd::Start(StartCmd::default()) as default value") + } +} + +impl Runnable for EntryPoint { + fn run(&self) { + self.command().run() + } +} + +impl Command for EntryPoint { + /// Name of this program as a string + fn name() -> &'static str { + ZebradCmd::name() + } + + /// Description of this program + fn description() -> &'static str { + ZebradCmd::description() + } + + /// Version of this program + fn version() -> &'static str { + ZebradCmd::version() + } + + /// Authors of this program + fn authors() -> &'static str { + ZebradCmd::authors() + } + + /// Get usage information for a particular subcommand (if available) + fn subcommand_usage(command: &str) -> Option { + ZebradCmd::subcommand_usage(command) + } +} + +impl Configurable for EntryPoint { + /// Path to the command's configuration file + fn config_path(&self) -> Option { + match &self.config { + // Use explicit `-c`/`--config` argument if passed + Some(cfg) => Some(cfg.clone()), + + // Otherwise defer to the toplevel command's config path logic + None => self.command.as_ref().and_then(|cmd| cmd.config_path()), + } + } + + /// Process the configuration after it has been loaded, potentially + /// modifying it or returning an error if options are incompatible + fn process_config(&self, config: ZebradConfig) -> Result { + match &self.command { + Some(cmd) => cmd.process_config(config), + None => Ok(config), + } + } +} diff --git a/zebrad/src/commands.rs b/zebrad/src/commands.rs index 9dcc2745b5b..c53d177d2b9 100644 --- a/zebrad/src/commands.rs +++ b/zebrad/src/commands.rs @@ -9,10 +9,12 @@ mod version; use self::ZebradCmd::*; use self::{ - copy_state::CopyStateCmd, download::DownloadCmd, generate::GenerateCmd, start::StartCmd, + copy_state::CopyStateCmd, download::DownloadCmd, generate::GenerateCmd, tip_height::TipHeightCmd, version::VersionCmd, }; +pub use self::start::StartCmd; + use crate::config::ZebradConfig; use abscissa_core::{ diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index 9c792e340d2..b22a82df7ce 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -90,7 +90,7 @@ use crate::{ }; /// `start` subcommand -#[derive(Command, Debug, Options)] +#[derive(Command, Debug, Options, Default)] pub struct StartCmd { /// Filter strings which override the config file and defaults #[options(free, help = "tracing filters which override the zebrad.toml config")] diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 13718fd00d7..79249ea1db9 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -446,21 +446,6 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) Ok(()) } -#[test] -fn app_no_args() -> Result<()> { - let _init_guard = zebra_test::init(); - - let testdir = testdir()?.with_config(&mut default_test_config()?)?; - - let child = testdir.spawn_child(args![])?; - let output = child.wait_with_output()?; - let output = output.assert_success()?; - - output.stdout_line_contains("USAGE:")?; - - Ok(()) -} - #[test] fn version_no_args() -> Result<()> { let _init_guard = zebra_test::init(); @@ -517,6 +502,37 @@ fn config_test() -> Result<()> { // Check that an older stored configuration we have for Zebra works stored_config_works()?; + // Runs `zebrad` serially to avoid potential port conflicts + app_no_args()?; + + Ok(()) +} + +/// Test that `zebrad` runs the start command with no args +fn app_no_args() -> Result<()> { + let _init_guard = zebra_test::init(); + + // start caches state, so run one of the start tests with persistent state + let testdir = testdir()?.with_config(&mut persistent_test_config()?)?; + + let mut child = testdir.spawn_child(args![])?; + + // Run the program and kill it after a few seconds + std::thread::sleep(LAUNCH_DELAY); + child.kill(true)?; + + let output = child.wait_with_output()?; + let output = output.assert_failure()?; + + output.stdout_line_contains("Starting zebrad")?; + + // Make sure the command passed the legacy chain check + output.stdout_line_contains("starting legacy chain check")?; + output.stdout_line_contains("no legacy chain found")?; + + // Make sure the command was killed + output.assert_was_killed()?; + Ok(()) } From 6dfaa73969e78c0904c38bd09d421da6db00fe86 Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 30 Aug 2022 22:09:26 +1000 Subject: [PATCH 40/42] Stop cancelling manual full syncs (#5000) --- .github/workflows/continous-integration-docker.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index df9656813c7..abbf1e8a691 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -324,8 +324,12 @@ jobs: # first sync complete, then queue the latest pending sync, cancelling any syncs in between. # (As the general workflow concurrency group just gets matched in Pull Requests, # it has no impact on this job.) + # + # TODO: + # - allow multiple manual syncs on a branch, and isolate manual syncs from automatic syncs, by adding '-${{ github.run_id }}' when github.event.inputs.run-full-sync is true + # - stop multiple automatic full syncs across different PRs by removing '−${{ github.ref }}' when needs.get-available-disks.outputs.zebra_tip_disk is true concurrency: - group: github.workflow−{{ github.ref }} + group: ${{ github.workflow }}−${{ github.ref }} cancel-in-progress: false # Test that Zebra can sync to the chain tip, using a cached Zebra tip state, From e9597c0406f13c01e60327089d5828ab7c0c1f7a Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 30 Aug 2022 23:42:17 +1000 Subject: [PATCH 41/42] Split a long full sync job (#5001) --- .github/workflows/deploy-gcp-tests.yml | 65 +++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index 3aaed05cef3..e205b485f13 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -755,7 +755,7 @@ jobs: # follow the logs of the test we just launched, up to block 1,760,000 or later # (or the test finishing) # - # We chose this height because it was about 9 hours into the NU5 sync, at the end of August 2022. + # We chose this height because it was about 8 hours into the NU5 sync, at the end of August 2022. logs-1760k: name: Log ${{ inputs.test_id }} test (1760k) needs: [ logs-1740k ] @@ -813,10 +813,71 @@ jobs: -e 'test result:.*finished in' \ " + # follow the logs of the test we just launched, up to block 1,780,000 or later + # (or the test finishing) + # + # We chose this height because it was about 12 hours into the NU5 sync, at the end of August 2022. + logs-1780k: + name: Log ${{ inputs.test_id }} test (1780k) + needs: [ logs-1760k ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + retries: '3' + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Show recent logs, following until block 1,780,000 (or the test finishes) + - name: Show logs for ${{ inputs.test_id }} test (1780k) + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'estimated progress.*current_height.*=.*17[8-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ + " + # follow the logs of the test we just launched, up to the last checkpoint (or the test finishing) logs-checkpoint: name: Log ${{ inputs.test_id }} test (checkpoint) - needs: [ logs-1760k ] + needs: [ logs-1780k ] # If the previous job fails, we still want to show the logs. if: ${{ !cancelled() }} runs-on: ubuntu-latest From c322533125562b9ff74651e7666545cda23cc9ad Mon Sep 17 00:00:00 2001 From: Marek Date: Thu, 1 Sep 2022 00:00:46 +0200 Subject: [PATCH 42/42] v1.0.0-beta.14-release (#5006) * Bump crate versions * Bump `USER_AGENT` * Update `README.md` * Update `install.md` * Update `CHANGELOG.md` * Apply suggestions from code review Co-authored-by: teor * Apply a suggestion from code review Co-authored-by: teor * Move the "Networking" section above "Tests" * Merge PRs that update checkpoints * Refactor `CHANGELOG.md` * Add missing PRs to `CHANGELOG.md` * Apply suggestions from code review Co-authored-by: teor Co-authored-by: teor --- CHANGELOG.md | 98 ++++++++++++++++++++++++++++++++++ Cargo.lock | 24 ++++----- README.md | 46 +++++++--------- book/src/user/install.md | 13 +++-- tower-batch/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 2 +- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-network/src/constants.rs | 2 +- zebra-node-services/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-script/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 17 files changed, 151 insertions(+), 56 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index efb2641103b..89e1c01b0ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,104 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 1.0.0-beta.14](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-beta.13) - 2022-08-30 + +This release contains a variety of CI fixes, test fixes and dependency updates. +It contains two breaking changes: + +- the recommended disk capacity for Zebra is now 300 GB, and the recommended network bandwidth is 100 GB per month, and +- when no command is provided on the command line, `zebrad` automatically starts syncing (like `zcashd`). + +The sync performance of `lightwalletd` is also improved. + +### Added + +- Store history trees by height in the non-finalized state (#4928) +- Breaking: Add `start` as default subcommand for `zebrad` (#4957) + +### Changed + +- Fix a performance regression when serving blocks via the Zcash network protocol and RPCs (#4933) +- Update block hash checkpoints for mainnet (#4919, #4972) +- Enable a `tinyvec` feature to speed up compilation (#4796) +- Split the `zebra_state::service::read` module (#4827) +- Disallow Orchard `ivk = 0` on `IncomingViewingKey::from` & `SpendingKey` generation (#3962) + +#### Docs + +- Increase disk and network requirements for long-term deployment (#4948, #4963) +- Update supported Rust versions in `README.md` (#4938) +- Document edge cases in sync workflows (#4973) +- Add missing status badges & sections (#4817) + +#### Rust Dependencies + +- Bump `serde` from 1.0.137 to 1.0.144 (#4865, #4876, #4925) +- Bump `serde_json` from 1.0.81 to 1.0.83 (#4727, #4877) +- Bump `serde_with` from 1.14.0 to 2.0.0 (#4785) +- Bump `futures` from 0.3.21 to 0.3.23 (#4913) +- Bump `futures-core` from 0.3.21 to 0.3.23 (#4915) +- Bump `chrono` from 0.4.19 to 0.4.20 (#4898) +- Bump `criterion` from 0.3.5 to 0.3.6 (#4761) +- Bump `thiserror` from 1.0.31 to 1.0.32 (#4878) +- Bump `vergen` from 7.2.1 to 7.3.2 (#4890) +- Bump `tinyvec` from 1.5.1 to 1.6.0 (#4888) +- Bump `insta` from 1.15.0 to 1.17.1 (#4884) +- Bump `semver` from 1.0.12 to 1.0.13 (#4879) +- Bump `bytes` from 1.1.0 to 1.2.1 (#4843) +- Bump `tokio` from 1.20.0 to 1.20.1 (#4864) +- Bump `hyper` from 0.14.19 to 0.14.20 (#4764) +- Bump `once_cell` from 1.12.0 to 1.13.0 (#4749) +- Bump `regex` from 1.5.6 to 1.6.0 (#4755) +- Bump `inferno` from 0.11.6 to 0.11.7 (#4829) + +#### CI Dependencies + +- Bump `actions/github-script` from 6.1.0 to 6.2.0 (#4986) +- Bump `reviewdog/action-actionlint` from 1.27.0 to 1.29.0 (#4923, #4987) +- Bump `tj-actions/changed-files` from 24 to 29.0.2 (#4936, #4959, #4985) +- Bump `w9jds/firebase-action` from 2.2.2 to 11.5.0 (#4905) +- Bump `docker/build-push-action` from 3.0.0 to 3.1.1 (#4797, #4895) + +### Fixed + +- Increase the number of blocks checked for legacy transactions (#4804) + +#### CI + +- Split a long full sync job (#5001) +- Stop cancelling manual full syncs (#5000) +- Run a single CI workflow as required (#4981) +- Fix some clippy warnings (#4927, #4931) +- Improve Zebra acceptance test diagnostics (#4958) +- Expand cached state disks before running tests (#4962) +- Increase full sync timeouts for longer syncs (#4961) +- Fix a regular expression typo in a full sync job (#4950) +- Write cached state images after update syncs, and use the latest image from any commit (#4949) +- Increase CI disk size to 200GB (#4945) +- Make sure Rust tests actually run in `deploy-gcp-tests.yml` (#4710) +- Copy lightwalletd from the correct path during Docker builds (#4886) +- Use FHS for deployments and artifacts (#4786) +- Retry gcloud authentication if it fails (#4940) +- Disable beta Rust tests and add parameter download logging (#4930) +- Do not run versioning job when pushing to main (#4970) +- Deploy long running node instances on release (#4939) +- Run build and test jobs on cargo and clippy config changes (#4941) +- Increase Mergify batch sizes (#4947) + +#### Networking + +- Send height to peers (#4904) +- Fix handshake timing and error handling (#4772) + +#### Tests + +- Show full Zebra test panic details in CI logs (#4942) +- Update timeout for Zebra sync tests (#4918) +- Improve test reliability and performance (#4869) +- Use `FuturesOrdered` in `fallback_verification` test (#4867) +- Skip some RPC tests when `ZEBRA_SKIP_NETWORK_TESTS` is set (#4849) +- Truncate the number of transactions in send transaction test (#4848) ## [Zebra 1.0.0-beta.13](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-beta.13) - 2022-07-29 diff --git a/Cargo.lock b/Cargo.lock index 72d36cf7d7e..bcceb62b672 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5453,7 +5453,7 @@ dependencies = [ [[package]] name = "tower-batch" -version = "0.2.28" +version = "0.2.29" dependencies = [ "color-eyre", "ed25519-zebra", @@ -5477,7 +5477,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.28" +version = "0.2.29" dependencies = [ "futures-core", "pin-project 0.4.29", @@ -6307,7 +6307,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "aes", "bech32", @@ -6371,7 +6371,7 @@ version = "1.0.0-beta.0" [[package]] name = "zebra-consensus" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "bellman", "blake2b_simd 1.0.0", @@ -6415,7 +6415,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "arti-client", "bitflags", @@ -6453,14 +6453,14 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "zebra-chain", ] [[package]] name = "zebra-rpc" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "chrono", "futures", @@ -6489,7 +6489,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "displaydoc", "hex", @@ -6502,7 +6502,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "bincode", "chrono", @@ -6539,7 +6539,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "color-eyre", "futures", @@ -6566,7 +6566,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "color-eyre", "hex", @@ -6582,7 +6582,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" dependencies = [ "abscissa_core", "atty", diff --git a/README.md b/README.md index fe669b66f89..85470f0a774 100644 --- a/README.md +++ b/README.md @@ -79,14 +79,14 @@ install mechanism. To run `zebrad`, follow the instructions to compile `zebrad` for your platform: 1. Install [`cargo` and `rustc`](https://www.rust-lang.org/tools/install). - - Zebra is tested with the latest `stable` Rust version. - Earlier versions are not supported or tested. - Any Zebra release can remove support for older Rust versions, without any notice. - (Rust 1.59 and earlier are definitely not supported, due to missing features.) + - Zebra is tested with the latest `stable` Rust version. + Earlier versions are not supported or tested. + Any Zebra release can remove support for older Rust versions, without any notice. + (Rust 1.59 and earlier are definitely not supported, due to missing features.) 2. Install Zebra's build dependencies: - - **libclang:** the `libclang`, `libclang-dev`, `llvm`, or `llvm-dev` packages, depending on your package manager - - **clang** or another C++ compiler: `g++`, `Xcode`, or `MSVC` -3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.13 zebrad` + - **libclang:** the `libclang`, `libclang-dev`, `llvm`, or `llvm-dev` packages, depending on your package manager + - **clang** or another C++ compiler: `g++`, `Xcode`, or `MSVC` +3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.14 zebrad` 4. Run `zebrad start` (see [Running Zebra](https://zebra.zfnd.org/user/run.html) for more information) For more detailed instructions, refer to the [documentation](https://zebra.zfnd.org/user/install.html). @@ -96,6 +96,7 @@ For more detailed instructions, refer to the [documentation](https://zebra.zfnd. For performance reasons, some debugging and monitoring features are disabled in release builds. You can [enable these features](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-feature-flags) using: + ```sh cargo install --features= ... ``` @@ -103,18 +104,21 @@ cargo install --features= ... ### System Requirements The recommended requirements for compiling and running `zebrad` are: + - 4 CPU cores - 16 GB RAM - 300 GB available disk space for building binaries and storing cached chain state -- 100 Mbps network connection, with 300 GB of uploads and downloads per month +- 100 Mbps network connection, with 300 GB of uploads and downloads per month We continuously test that our builds and tests pass on: -The *latest* [GitHub Runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources) for: +The _latest_ [GitHub Runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources) for: + - macOS - Ubuntu Docker: + - Debian Bullseye Zebra's tests can take over an hour, depending on your machine. @@ -142,6 +146,7 @@ macOS records these panics as crash reports. If you are seeing "Crash Reporter" dialogs during Zebra tests, you can disable them using this Terminal.app command: + ```sh defaults write com.apple.CrashReporter DialogType none ``` @@ -149,6 +154,7 @@ defaults write com.apple.CrashReporter DialogType none ### Network Ports and Data Usage By default, Zebra uses the following inbound TCP listener ports: + - 8233 on Mainnet - 18233 on Testnet @@ -157,6 +163,7 @@ If this is a problem for you, please [open a ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose) `zebrad`'s typical mainnet network usage is: + - Initial sync: 100 GB download, we expect the initial download to grow to hundreds of gigabytes over time - Ongoing updates: 10 MB - 10 GB upload and download per day, depending on user-created transaction size and peer requests @@ -192,37 +199,24 @@ So Zebra's state should always be valid, unless your OS or disk hardware is corr ## Known Issues There are a few bugs in Zebra that we're still working on fixing: + - No Windows support [#3801](https://github.com/ZcashFoundation/zebra/issues/3801) - We used to test with Windows Server 2019, but not anymore; see issue for details ### Performance -We are working on improving Zebra performance, the following are known issues: -- Send note commitment and history trees from the non-finalized state to the finalized state [#4824](https://github.com/ZcashFoundation/zebra/issues/4824) -- Speed up opening the database [#4822](https://github.com/ZcashFoundation/zebra/issues/4822) -- Revert note commitment and history trees when forking non-finalized chains [#4794](https://github.com/ZcashFoundation/zebra/issues/4794) -- Store only the first tree state in each identical series of tree states [#4784](https://github.com/ZcashFoundation/zebra/issues/4784) - -RPCs might also be slower than they used to be, we need to check: - Revert deserializing state transactions in rayon threads [#4831](https://github.com/ZcashFoundation/zebra/issues/4831) -Ongoing investigations: -- Find out which parts of CommitBlock/CommitFinalizedBlock are slow [#4823](https://github.com/ZcashFoundation/zebra/issues/4823) -- Mini-Epic: Stop tokio tasks running for a long time and blocking other tasks [#4747](https://github.com/ZcashFoundation/zebra/issues/4747) -- Investigate busiest tasks per tokio-console [#4583](https://github.com/ZcashFoundation/zebra/issues/4583) - ## Future Work -Features: -- Wallet functionality - Performance and Reliability: + - Reliable syncing under poor network conditions - Additional batch verification - Performance tuning Currently, the following features are out of scope: -- Mining support + - Optional Zcash network protocol messages - Consensus rules removed before Canopy activation (Zebra checkpoints on Canopy activation) @@ -230,7 +224,7 @@ Currently, the following features are out of scope: The [Zebra website](https://zebra.zfnd.org/) contains user documentation, such as how to run or configure Zebra, set up metrics integrations, etc., as well as -developer documentation, such as design documents. We also render [API +developer documentation, such as design documents. We also render [API documentation](https://doc.zebra.zfnd.org) for the external API of our crates, as well as [internal documentation](https://doc-internal.zebra.zfnd.org) for private APIs. diff --git a/book/src/user/install.md b/book/src/user/install.md index 7ce8fafbb8c..c72dd01bf0f 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -5,11 +5,11 @@ install mechanism. To run `zebrad`, follow the instructions to compile `zebrad` for your platform: 1. Install [`cargo` and `rustc`](https://www.rust-lang.org/tools/install). - - Using `rustup` installs the stable Rust toolchain, which `zebrad` targets. + - Using `rustup` installs the stable Rust toolchain, which `zebrad` targets. 2. Install Zebra's build dependencies: - - **libclang:** the `libclang`, `libclang-dev`, `llvm`, or `llvm-dev` packages, depending on your package manager - - **clang** or another C++ compiler: `g++`, `Xcode`, or `MSVC` -3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.13 zebrad` + - **libclang:** the `libclang`, `libclang-dev`, `llvm`, or `llvm-dev` packages, depending on your package manager + - **clang** or another C++ compiler: `g++`, `Xcode`, or `MSVC` +3. Run `cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-beta.14 zebrad` 4. Run `zebrad start` (see [Running Zebra](run.md) for more information) If you're interested in testing out `zebrad` please feel free, but keep in mind @@ -20,27 +20,30 @@ that there is a lot of key functionality still missing. If you're having trouble with: Dependencies: + - use `cargo install` without `--locked` to build with the latest versions of each dependency - **sqlite linker errors:** libsqlite3 is an optional dependency of the `zebra-network/tor` feature. If you don't have it installed, you might see errors like `note: /usr/bin/ld: cannot find -lsqlite3`. [Follow the arti instructions](https://gitlab.torproject.org/tpo/core/arti/-/blob/main/CONTRIBUTING.md#setting-up-your-development-environment) to install libsqlite3, or use one of these commands instead: + ```sh cargo build cargo build -p zebrad --all-features ``` Compilers: + - **clang:** install both `libclang` and `clang` - they are usually different packages - **libclang:** check out the [clang-sys documentation](https://github.com/KyleMayes/clang-sys#dependencies) - **g++ or MSVC++:** try using clang or Xcode instead - **rustc:** use rustc 1.58 or later - Zebra does not have a minimum supported Rust version (MSRV) policy yet - ### Dependencies Zebra primarily depends on pure Rust crates, and some Rust/C++ crates: + - [rocksdb](https://crates.io/crates/rocksdb) - [zcash_script](https://crates.io/crates/zcash_script) diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml index 47c2b4ec274..84bda2af54d 100644 --- a/tower-batch/Cargo.toml +++ b/tower-batch/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch" -version = "0.2.28" +version = "0.2.29" authors = ["Zcash Foundation "] license = "MIT" edition = "2021" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index d3c9e491901..c4bf9621ea1 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.28" +version = "0.2.29" authors = ["Zcash Foundation "] license = "MIT" edition = "2021" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 7870d095e48..290dfaaf8fe 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 5800a84cb37..65a10309895 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 5f3776e94c7..df8ac85c828 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 6e119c2470f..a3b1fadb8f9 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -242,7 +242,7 @@ pub const TIMESTAMP_TRUNCATION_SECONDS: u32 = 30 * 60; /// [BIP 14]: https://github.com/bitcoin/bips/blob/master/bip-0014.mediawiki // // TODO: generate this from crate metadata (#2375) -pub const USER_AGENT: &str = "/Zebra:1.0.0-beta.13/"; +pub const USER_AGENT: &str = "/Zebra:1.0.0-beta.14/"; /// The Zcash network protocol version implemented by this crate, and advertised /// during connection setup. diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 65bed6466ed..7e5a52f4ce4 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -2,7 +2,7 @@ name = "zebra-node-services" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" edition = "2021" repository = "https://github.com/ZcashFoundation/zebra" diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index b2203e8197f..303d5b270dc 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 4490448f8b8..5c97afa5062 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 44557e2fd41..9c0ab8a1ba5 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 3d1d9384792..3300e8fc439 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" edition = "2021" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 44855868245..781b2906628 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -2,7 +2,7 @@ name = "zebra-utils" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" edition = "2021" # Prevent accidental publication of this utility crate. publish = false diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 5fc2ccb8fba..96b112a8c94 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -2,7 +2,7 @@ name = "zebrad" authors = ["Zcash Foundation "] license = "MIT OR Apache-2.0" -version = "1.0.0-beta.13" +version = "1.0.0-beta.14" edition = "2021" # Zebra is only supported on the latest stable Rust version. Some earlier versions might work. # Zebra uses features introduced in Rust 1.58.