-
Notifications
You must be signed in to change notification settings - Fork 27
/
Cron.hs
834 lines (789 loc) · 39.2 KB
/
Cron.hs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
{-# LANGUAGE CPP #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE FlexibleContexts #-}
module Stackage.Database.Cron
( stackageServerCron
, newHoogleLocker
, singleRun
, StackageCronOptions(..)
, defHaddockBucketName
, defHaddockBucketUrl
) where
import Conduit
import Control.DeepSeq
import Control.SingleRun
import Control.Lens ((?~))
import qualified Data.ByteString.Char8 as BS8
import qualified Data.Conduit.Binary as CB
import Data.Conduit.Tar (FileInfo(..), FileType(..), untar)
import Data.Conduit.Zlib (WindowBits(WindowBits), compress, ungzip)
import qualified Data.IntMap.Strict as IntMap
import Data.Monoid (Any(..))
import Data.Streaming.Network (bindPortTCP)
import Data.Yaml (decodeFileEither)
import Database.Persist hiding (exists)
import Database.Persist.Postgresql hiding (exists)
import qualified Hoogle
import Amazonka hiding (Request, length, error)
import Amazonka.Data.Text (toText)
import Amazonka.S3
import Amazonka.S3.ListObjectsV2
import Amazonka.S3.Lens
import Amazonka.Lens
import Network.HTTP.Client
import Network.HTTP.Client.Conduit (bodyReaderSource)
import Network.HTTP.Simple (getResponseBody, httpJSONEither)
import Network.HTTP.Types (status200, status404)
import Pantry (CabalFileInfo(..), DidUpdateOccur(..),
HpackExecutable(HpackBundled), PackageIdentifierRevision(..),
defaultCasaMaxPerRequest, defaultCasaRepoPrefix,
defaultHackageSecurityConfig, defaultSnapshotLocation)
import Pantry.Internal.Stackage (HackageTarballResult(..), PantryConfig(..),
Storage(..), forceUpdateHackageIndex,
getHackageTarball, packageTreeKey)
import Path (parseAbsDir, toFilePath)
import RIO
import RIO.Directory
import RIO.File
import RIO.FilePath
import RIO.List as L
import qualified RIO.Map as Map
import RIO.Process (mkDefaultProcessContext)
import qualified RIO.Set as Set
import qualified RIO.Text as T
import RIO.Time
import Settings
import Stackage.Database.Github
import Stackage.Database.PackageInfo
import Stackage.Database.Query
import Stackage.Database.Schema
import Stackage.Database.Types
import System.Environment (getEnvironment)
import UnliftIO.Concurrent (getNumCapabilities)
import Web.PathPieces (fromPathPiece, toPathPiece)
import qualified Control.Retry as Retry
hoogleKey :: SnapName -> Text
hoogleKey name = T.concat
[ "hoogle/"
, toPathPiece name
, "/"
, VERSION_hoogle
, ".hoo"
]
hoogleUrl :: SnapName -> Text -> Text
hoogleUrl n haddockBucketUrl = T.concat
[ haddockBucketUrl
, "/"
, hoogleKey n
]
hackageDeprecatedUrl :: Request
hackageDeprecatedUrl = "https://hackage.haskell.org/packages/deprecated.json"
withStorage :: (Storage -> IO a) -> IO a
withStorage inner = do
as <- getAppSettings
withStackageDatabase False (appDatabase as) (\db -> inner (Storage (runDatabase db) id))
getStackageSnapshotsDir :: RIO StackageCron FilePath
getStackageSnapshotsDir = do
cron <- ask
cloneOrUpdate (scStackageRoot cron) (scSnapshotsRepo cron)
withResponseUnliftIO :: MonadUnliftIO m => Request -> Manager -> (Response BodyReader -> m b) -> m b
withResponseUnliftIO req man f = withRunInIO $ \ runInIO -> withResponse req man (runInIO . f)
-- | Under the SingleRun wrapper that ensures only one thing at a time is
-- writing the file in question, ensure that a Hoogle database exists on the
-- filesystem for the given SnapName. But only going so far as downloading it
-- from the haddock bucket. See 'createHoogleDB' for the function that puts it
-- there in the first place.
newHoogleLocker ::
(HasLogFunc env, MonadIO m) => env -> Manager -> Text -> m (SingleRun SnapName (Maybe FilePath))
newHoogleLocker env man bucketUrl = mkSingleRun hoogleLocker
where
hoogleLocker :: MonadIO n => SnapName -> n (Maybe FilePath)
hoogleLocker name =
runRIO env $ do
let fp = T.unpack $ hoogleKey name
exists <- doesFileExist fp
if exists
then return $ Just fp
else do
req' <- parseRequest $ T.unpack $ hoogleUrl name bucketUrl
let req = req' {decompress = const False}
withResponseUnliftIO req man $ \res ->
case responseStatus res of
status
| status == status200 -> do
createDirectoryIfMissing True $ takeDirectory fp
withBinaryFileDurableAtomic fp WriteMode $ \h ->
runConduitRes $
bodyReaderSource (responseBody res) .| ungzip .|
sinkHandle h
return $ Just fp
| status == status404 -> do
logWarn $ "NotFound: " <> display (hoogleUrl name bucketUrl)
return Nothing
| otherwise -> do
body <- liftIO $ brConsume $ responseBody res
logWarn $ "Unexpected status: " <> displayShow status
mapM_ (logWarn . displayBytesUtf8) body
return Nothing
getHackageDeprecations ::
(HasLogFunc env, MonadReader env m, MonadIO m) => m [Deprecation]
getHackageDeprecations = do
let policy = Retry.exponentialBackoff 50 <> Retry.limitRetries 5
jsonResponseDeprecated <-
liftIO $ Retry.recoverAll policy $ const $ httpJSONEither hackageDeprecatedUrl
case getResponseBody jsonResponseDeprecated of
Left err -> do
logError $
"There was an error parsing deprecated.json file: " <>
fromString (displayException err)
return []
Right deprecated -> return deprecated
stackageServerCron :: StackageCronOptions -> IO ()
stackageServerCron StackageCronOptions {..} = do
void $
-- Hacky approach instead of PID files
catchIO (bindPortTCP 17834 "127.0.0.1") $
const $ throwString "Stackage Cron loader process already running, exiting."
connectionCount <- getNumCapabilities
withStorage $ \storage -> do
lo <- logOptionsHandle stdout True
stackageRootDir <- getAppUserDataDirectory "stackage"
pantryRootDir <- parseAbsDir (stackageRootDir </> "pantry")
createDirectoryIfMissing True (toFilePath pantryRootDir)
updateRef <- newMVar True
cabalImmutable <- newIORef Map.empty
cabalMutable <- newIORef Map.empty
gpdCache <- newIORef IntMap.empty
defaultProcessContext <- mkDefaultProcessContext
aws <- do
aws' <- newEnv discover
endpoint <- lookup "AWS_S3_ENDPOINT" <$> getEnvironment
pure $ case endpoint of
Nothing -> aws'
Just ep -> configureService (setEndpoint True (BS8.pack ep) 443 Amazonka.S3.defaultService) aws'
withLogFunc (setLogMinLevel scoLogLevel lo) $ \logFunc -> do
let pantryConfig =
PantryConfig
{ pcHackageSecurity = defaultHackageSecurityConfig
, pcHpackExecutable = HpackBundled
, pcRootDir = pantryRootDir
, pcStorage = storage
, pcUpdateRef = updateRef
, pcParsedCabalFilesRawImmutable = cabalImmutable
, pcParsedCabalFilesMutable = cabalMutable
, pcConnectionCount = connectionCount
, pcCasaRepoPrefix = defaultCasaRepoPrefix
, pcCasaMaxPerRequest = defaultCasaMaxPerRequest
, pcSnapshotLocation = defaultSnapshotLocation
}
currentHoogleVersionId <- runRIO logFunc $ do
runStackageMigrations' pantryConfig
getCurrentHoogleVersionIdWithPantryConfig pantryConfig
let stackage =
StackageCron
{ scPantryConfig = pantryConfig
, scStackageRoot = stackageRootDir
, scProcessContext = defaultProcessContext
, scLogFunc = logFunc
, scForceFullUpdate = scoForceUpdate
, scCachedGPD = gpdCache
, scEnvAWS = aws
, scDownloadBucketName = scoDownloadBucketName
, scDownloadBucketUrl = scoDownloadBucketUrl
, scUploadBucketName = scoUploadBucketName
, scSnapshotsRepo = scoSnapshotsRepo
, scReportProgress = scoReportProgress
, scCacheCabalFiles = scoCacheCabalFiles
, scHoogleVersionId = currentHoogleVersionId
}
runRIO stackage (runStackageUpdate scoDoNotUpload)
runStackageUpdate :: Bool -> RIO StackageCron ()
runStackageUpdate doNotUpload = do
forceFullUpdate <- scForceFullUpdate <$> ask
logInfo $ "Starting stackage-cron update" <> bool "" " with --force-update" forceFullUpdate
runStackageMigrations
didUpdate <- forceUpdateHackageIndex (Just "stackage-server cron job")
case didUpdate of
UpdateOccurred -> logInfo "Updated hackage index"
NoUpdateOccurred -> logInfo "No new packages in hackage index"
logInfo "Getting deprecated info now"
getHackageDeprecations >>= setDeprecations
corePackageGetters <- makeCorePackageGetters
runResourceT $
join $
runConduit $ sourceSnapshots .| foldMC (createOrUpdateSnapshot corePackageGetters) (pure ())
unless doNotUpload uploadSnapshotsJSON
buildAndUploadHoogleDB doNotUpload
logInfo "Finished building and uploading Hoogle DBs"
-- | This will look at 'global-hints.yaml' and will create core package getters that are reused
-- later for adding those package to individual snapshot.
makeCorePackageGetters ::
RIO StackageCron (Map CompilerP [CorePackageGetter])
makeCorePackageGetters = do
rootDir <- scStackageRoot <$> ask
contentDir <- getStackageContentDir rootDir
coreCabalFiles <- getCoreCabalFiles rootDir
liftIO (decodeFileEither (contentDir </> "stack" </> "global-hints.yaml")) >>= \case
Right (hints :: Map CompilerP (Map PackageNameP VersionP)) ->
Map.traverseWithKey
(\compiler ->
fmap Map.elems .
Map.traverseMaybeWithKey (makeCorePackageGetter compiler coreCabalFiles))
hints
Left exc -> do
logError $
"Error parsing 'global-hints.yaml' file: " <> fromString (displayException exc)
return mempty
getCoreCabalFiles ::
FilePath
-> RIO StackageCron (Map PackageIdentifierP (GenericPackageDescription, CabalFileIds))
getCoreCabalFiles rootDir = do
coreCabalFilesDir <- getCoreCabalFilesDir rootDir
cabalFileNames <- getDirectoryContents coreCabalFilesDir
cabalFiles <-
forM (filter (isExtensionOf ".cabal") cabalFileNames) $ \cabalFileName ->
let pidTxt = T.pack (dropExtension (takeFileName cabalFileName))
in case fromPathPiece pidTxt of
Nothing -> do
logError $ "Invalid package identifier: " <> fromString cabalFileName
pure Nothing
Just pid -> do
cabalBlob <- readFileBinary (coreCabalFilesDir </> cabalFileName)
mCabalInfo <- run $ addCabalFile pid cabalBlob
pure ((,) pid <$> mCabalInfo)
pure $ Map.fromList $ catMaybes cabalFiles
-- | Core package info rarely changes between the snapshots, therefore it would be wasteful to
-- load, parse and update all packages from gloabl-hints for each snapshot, instead we produce
-- a memoized version that will do it once initiall and then return information aboat a
-- package on subsequent invocations.
makeCorePackageGetter ::
CompilerP
-> Map PackageIdentifierP (GenericPackageDescription, CabalFileIds)
-> PackageNameP
-> VersionP
-> RIO StackageCron (Maybe CorePackageGetter)
makeCorePackageGetter _compiler fallbackCabalFileMap pname ver =
run (getHackageCabalByRev0 pid) >>= \case
Nothing -> do
logWarn $
"Core package from global-hints: '" <> display pid <> "' was not found in pantry."
forM (Map.lookup pid fallbackCabalFileMap) $ \(gpd, cabalFileIds) -> do
logInfo $
"Falling back on '" <> display pid <>
".cabal' file from the commercialhaskell/core-cabal-files repo"
pure $ pure (Left cabalFileIds, Nothing, pid, gpd)
Just (hackageCabalId, blobId, _) -> do
pkgInfoRef <- newIORef Nothing -- use for caching of pkgInfo
let getCabalFileIdsTree gpd =
\case
Just tree -> pure $ Right tree
Nothing -> Left <$> getCabalFileIds blobId gpd
let getMemoPackageInfo =
readIORef pkgInfoRef >>= \case
Just pkgInfo -> return pkgInfo
Nothing -> do
whenM (scReportProgress <$> ask) $
logSticky $ "Loading core package: " <> display pid
htr <- getHackageTarball pir Nothing
case htrFreshPackageInfo htr of
Just (gpd, treeId) -> do
eTree <-
run $ do
mTree <- getEntity treeId
getCabalFileIdsTree gpd mTree
let pkgInfo = (eTree, Just hackageCabalId, pid, gpd)
gpd `deepseq` writeIORef pkgInfoRef $ Just pkgInfo
pure pkgInfo
Nothing -> do
(gpd, eCabalTree) <-
run $ do
cabalBlob <- loadBlobById blobId
let gpd = parseCabalBlob cabalBlob
mTree <- getTreeForKey (packageTreeKey (htrPackage htr))
(,) gpd <$> getCabalFileIdsTree gpd mTree
let pkgInfo = (eCabalTree, Just hackageCabalId, pid, gpd)
gpd `deepseq` writeIORef pkgInfoRef $ Just pkgInfo
pure pkgInfo
pure $ Just getMemoPackageInfo
where
pid = PackageIdentifierP pname ver
pir =
PackageIdentifierRevision (unPackageNameP pname) (unVersionP ver) (CFIRevision (Revision 0))
-- TODO: for now it is only from hackage, PantryPackage needs an update to use other origins
-- | A pantry package is being added to a particular snapshot. Extra information like compiler and
-- flags are passed on in order to properly figure out dependencies and modules
addPantryPackage ::
SnapshotId -> CompilerP -> Bool -> Map FlagNameP Bool -> PantryPackage -> RIO StackageCron Bool
addPantryPackage sid compiler isHidden flags (PantryPackage pc treeKey) = do
env <- ask
let gpdCachedRef = scCachedGPD env
cache = scCacheCabalFiles env
let blobKeyToInt = fromIntegral . unSqlBackendKey . unBlobKey
let updateCacheGPD blobId gpd =
gpd `deepseq`
atomicModifyIORef' gpdCachedRef (\cacheMap -> (IntMap.insert blobId gpd cacheMap, gpd))
let getCachedGPD treeCabal =
\case
Just gpd | cache -> updateCacheGPD (blobKeyToInt treeCabal) gpd
Just gpd -> pure gpd
Nothing | cache -> do
cacheMap <- readIORef gpdCachedRef
case IntMap.lookup (blobKeyToInt treeCabal) cacheMap of
Just gpd -> pure gpd
Nothing ->
loadBlobById treeCabal >>=
updateCacheGPD (blobKeyToInt treeCabal) . parseCabalBlob
Nothing -> parseCabalBlob <$> loadBlobById treeCabal
let storeHackageSnapshotPackage hcid mtid mgpd =
getTreeForKey treeKey >>= \case
Just (Entity treeId _)
| Just tid <- mtid
, tid /= treeId -> do
lift $ logError $ "Pantry Tree Key mismatch for: " <> display pc
pure False
Just tree@(Entity _ Tree {treeCabal})
| Just treeCabal' <- treeCabal -> do
gpd <- getCachedGPD treeCabal' mgpd
let mhcid = Just hcid
eTree = Right tree
addSnapshotPackage sid compiler Hackage eTree mhcid isHidden flags pid gpd
pure True
_ -> do
lift $ logError $ "Pantry is missing the source tree for " <> display pc
pure False
mHackageCabalInfo <- run $ getHackageCabalByKey pid (pcCabalKey pc)
case mHackageCabalInfo of
Nothing -> do
logError $ "Could not find the cabal file for: " <> display pc
pure False
Just (hcid, Nothing) -> do
mHPI <-
htrFreshPackageInfo <$>
getHackageTarball (toPackageIdentifierRevision pc) (Just treeKey)
run $
case mHPI of
Just (gpd, treeId) -> storeHackageSnapshotPackage hcid (Just treeId) (Just gpd)
Nothing -> storeHackageSnapshotPackage hcid Nothing Nothing
Just (hcid, mtid) -> run $ storeHackageSnapshotPackage hcid mtid Nothing
where
pid = PackageIdentifierP (pcPackageName pc) (pcVersion pc)
-- | Download a list of available .html files from S3 bucket for a particular resolver and record
-- in the database which modules have documentation available for them.
checkForDocs :: SnapshotId -> SnapName -> ResourceT (RIO StackageCron) ()
checkForDocs snapshotId snapName = do
bucketName <- lift (scDownloadBucketName <$> ask)
env <- asks scEnvAWS
mods <-
runConduit $
paginate env (req bucketName) .| concatMapC (fromMaybe [] . (^. listObjectsV2Response_contents)) .|
mapC (\obj -> toText (obj ^. object_key)) .|
concatMapC (T.stripSuffix ".html" >=> T.stripPrefix prefix >=> pathToPackageModule) .|
sinkList
-- it is faster to download all modules in this snapshot, than process them with a conduit all
-- the way to the database.
sidsCacheRef <- newIORef Map.empty
-- Cache is for SnapshotPackageId, there will be many modules per peckage, no need to look into
-- the database for each one of them.
n <- max 1 . (`div` 2) <$> getNumCapabilities
unexpectedPackages <- lift $ pooledMapConcurrentlyN n (markModules sidsCacheRef) mods
forM_ (Set.fromList $ catMaybes unexpectedPackages) $ \pid ->
lift $
logWarn $
"Documentation found for package '" <> display pid <>
"', which does not exist in this snapshot: " <>
display snapName
where
prefix = textDisplay snapName <> "/"
req bucketName = newListObjectsV2 (BucketName bucketName) & listObjectsV2_prefix ?~ prefix
-- | This function records all package modules that have documentation available, the ones
-- that are not found in the snapshot reported back as an error. Besides being run
-- concurrently this function optimizes the SnapshotPackageId lookup as well, since that can
-- be shared amongst many modules of one package.
markModules sidsCacheRef (pid, modName) = do
sidsCache <- readIORef sidsCacheRef
let mSnapshotPackageId = Map.lookup pid sidsCache
mFound <- run $ markModuleHasDocs snapshotId pid mSnapshotPackageId modName
case mFound of
Nothing -> pure $ Just pid -- This package doesn't exist in the snapshot!
Just snapshotPackageId
| Nothing <- mSnapshotPackageId -> do
atomicModifyIORef'
sidsCacheRef
(\cacheMap -> (Map.insert pid snapshotPackageId cacheMap, ()))
pure Nothing
_ -> pure Nothing
data SnapshotFileInfo = SnapshotFileInfo
{ sfiSnapName :: !SnapName
, sfiUpdatedOn :: !UTCTime
, sfiSnapshotFileGetter :: !(RIO StackageCron (Maybe SnapshotFile))
}
-- | Use 'github.com/commercialhaskell/stackage-snapshots' repository to source all of the packages
-- one snapshot at a time.
sourceSnapshots :: ConduitT a SnapshotFileInfo (ResourceT (RIO StackageCron)) ()
sourceSnapshots = do
snapshotsDir <- lift $ lift getStackageSnapshotsDir
sourceDirectoryDeep False (snapshotsDir </> "lts") .| concatMapMC (getLtsParser snapshotsDir)
sourceDirectoryDeep False (snapshotsDir </> "nightly") .|
concatMapMC (getNightlyParser snapshotsDir)
where
makeSnapshotFileInfo gitDir fp mFileNameDate snapName = do
let parseSnapshot updatedOn = do
esnap <- liftIO $ decodeFileEither fp
case esnap of
Right snap ->
let publishDate =
sfPublishDate snap <|> mFileNameDate <|> Just (utctDay updatedOn)
in return $ Just snap {sfPublishDate = publishDate}
Left exc -> do
logError $
"Error parsing snapshot file: " <> fromString fp <> "\n" <>
fromString (displayException exc)
return Nothing
mUpdatedOn <- lastGitFileUpdate gitDir fp
forM mUpdatedOn $ \updatedOn -> do
env <- lift ask
return $
SnapshotFileInfo
{ sfiSnapName = snapName
, sfiUpdatedOn = updatedOn
, sfiSnapshotFileGetter = runRIO env (parseSnapshot updatedOn)
}
getLtsParser gitDir fp =
case mapM (BS8.readInt . BS8.pack) $ take 2 $ reverse (splitPath fp) of
Just [(minor, ".yaml"), (major, "/")] ->
makeSnapshotFileInfo gitDir fp Nothing $ SNLts major minor
_ -> do
logError
("Couldn't parse the filepath into an LTS version: " <> display (T.pack fp))
return Nothing
getNightlyParser gitDir fp =
case mapM (BS8.readInt . BS8.pack) $ take 3 $ reverse (splitPath fp) of
Just [(day, ".yaml"), (month, "/"), (year, "/")]
| Just date <- fromGregorianValid (fromIntegral year) month day ->
makeSnapshotFileInfo gitDir fp (Just date) $ SNNightly date
_ -> do
logError
("Couldn't parse the filepath into a Nightly date: " <> display (T.pack fp))
return Nothing
-- | Creates a new `Snapshot` if it is not yet present in the database and decides if update
-- is necessary when it already exists.
decideOnSnapshotUpdate :: SnapshotFileInfo -> RIO StackageCron (Maybe (SnapshotId, SnapshotFile))
decideOnSnapshotUpdate SnapshotFileInfo {sfiSnapName, sfiUpdatedOn, sfiSnapshotFileGetter} = do
forceUpdate <- scForceFullUpdate <$> ask
let mkLogMsg rest = "Snapshot with name: " <> display sfiSnapName <> " " <> rest
mKeySnapFile <-
run (getBy (UniqueSnapshot sfiSnapName)) >>= \case
Just (Entity _key snap)
| snapshotUpdatedOn snap == Just sfiUpdatedOn && not forceUpdate -> do
logInfo $ mkLogMsg "already exists and is up to date."
return Nothing
Just entity@(Entity _key snap)
| Nothing <- snapshotUpdatedOn snap -> do
logWarn $ mkLogMsg "did not finish updating last time."
fmap (Just entity, ) <$> sfiSnapshotFileGetter
Just entity -> do
unless forceUpdate $ logWarn $ mkLogMsg "was updated, applying new patch."
fmap (Just entity, ) <$> sfiSnapshotFileGetter
Nothing -> fmap (Nothing, ) <$> sfiSnapshotFileGetter
-- Add new snapshot to the database, when necessary
case mKeySnapFile of
Just (Just (Entity snapKey snap), sf@SnapshotFile {sfCompiler, sfPublishDate})
| Just publishDate <- sfPublishDate -> do
let updatedSnap =
Snapshot sfiSnapName sfCompiler publishDate (snapshotUpdatedOn snap)
run $ replace snapKey updatedSnap
pure $ Just (snapKey, sf)
Just (Nothing, sf@SnapshotFile {sfCompiler, sfPublishDate})
| Just publishDate <- sfPublishDate ->
fmap (, sf) <$>
run (insertUnique (Snapshot sfiSnapName sfCompiler publishDate Nothing))
_ -> return Nothing
type CorePackageGetter
= RIO StackageCron ( Either CabalFileIds (Entity Tree)
, Maybe HackageCabalId
, PackageIdentifierP
, GenericPackageDescription)
-- | This is an optimized version of snapshoat loading which can load a snapshot and documentation
-- info for previous snapshot at the same time. It will execute concurrently the loading of
-- current snapshot as well as an action that was passed as an argument. At the end it will return
-- an action that should be invoked in order to mark modules that have documentation available,
-- which in turn can be passed as an argument to the next snapshot loader.
createOrUpdateSnapshot ::
Map CompilerP [CorePackageGetter]
-> ResourceT (RIO StackageCron) ()
-> SnapshotFileInfo
-> ResourceT (RIO StackageCron) (ResourceT (RIO StackageCron) ())
createOrUpdateSnapshot corePackageInfoGetters prevAction sfi@SnapshotFileInfo { sfiSnapName
, sfiUpdatedOn
} = do
finishedDocs <- newIORef False
runConcurrently
(Concurrently (prevAction >> writeIORef finishedDocs True) *>
Concurrently (lift (loadCurrentSnapshot finishedDocs)))
where
loadCurrentSnapshot finishedDocs = do
loadDocs <-
decideOnSnapshotUpdate sfi >>= \case
Nothing -> return $ pure ()
Just (snapshotId, snapshotFile) ->
updateSnapshot
corePackageInfoGetters
snapshotId
sfiSnapName
sfiUpdatedOn
snapshotFile
report <- scReportProgress <$> ask
when report $
unlessM (readIORef finishedDocs) $
logSticky "Still loading the docs for previous snapshot ..."
pure loadDocs
-- | Updates all packages in the snapshot. If any missing they will be created. Returns an action
-- that will check for available documentation for modules that are known to exist and mark as
-- documented when haddock is present on AWS S3. Only after documentation has been checked this
-- snapshot will be marked as completely updated. This is required in case something goes wrong and
-- process is interrupted
updateSnapshot ::
Map CompilerP [CorePackageGetter]
-> SnapshotId
-> SnapName
-> UTCTime
-> SnapshotFile
-> RIO StackageCron (ResourceT (RIO StackageCron) ())
updateSnapshot corePackageGetters snapshotId snapName updatedOn SnapshotFile {..} = do
insertSnapshotName snapshotId snapName
loadedPackageCountRef <- newIORef (0 :: Int)
let totalPackages = length sfPackages
addPantryPackageWithReport pp = do
let PantryCabal {pcPackageName} = ppPantryCabal pp
isHidden = fromMaybe False (Map.lookup pcPackageName sfHidden)
flags = fromMaybe Map.empty $ Map.lookup pcPackageName sfFlags
curSucc <- addPantryPackage snapshotId sfCompiler isHidden flags pp
atomicModifyIORef' loadedPackageCountRef (\c -> (c + 1, ()))
pure curSucc
-- Leave some cores and db connections for the doc loader
n <- max 1 . (`div` 2) <$> getNumCapabilities
before <- getCurrentTime
report <- scReportProgress <$> ask
pantryUpdatesSucceeded <-
runConcurrently
(Concurrently
(when report (runProgressReporter loadedPackageCountRef totalPackages snapName)) *>
Concurrently (pooledMapConcurrentlyN n addPantryPackageWithReport sfPackages))
after <- getCurrentTime
let timeTotal = round (diffUTCTime after before)
(mins, secs) = timeTotal `quotRem` (60 :: Int)
packagePerSecond = fromIntegral ((totalPackages * 100) `div` timeTotal) / 100 :: Float
allPantryUpdatesSucceeded = and pantryUpdatesSucceeded
logInfo $
mconcat
[ "Loading snapshot '"
, display snapName
, "' was done (in "
, displayShow mins
, "min "
, displayShow secs
, "sec). With average "
, displayShow packagePerSecond
, " packages/sec. There are still docs."
]
case Map.lookup sfCompiler corePackageGetters of
Nothing -> logError $ "Hints are not found for the compiler: " <> display sfCompiler
Just _
| not allPantryUpdatesSucceeded ->
logWarn $
mconcat
[ "There was an issue loading a snapshot '"
, display snapName
, "', deferring addition of packages "
, "from global-hints until next time."
]
Just compilerCorePackages ->
forM_ compilerCorePackages $ \getCorePackageInfo -> do
(eTree, mhcid, pid, gpd) <- getCorePackageInfo
run $ addSnapshotPackage snapshotId sfCompiler Core eTree mhcid False mempty pid gpd
return $ do
checkForDocsSucceeded <-
tryAny (checkForDocs snapshotId snapName) >>= \case
Left exc -> do
logError $ "Received exception while getting the docs: " <> displayShow exc
return False
Right () -> return True
if allPantryUpdatesSucceeded &&
checkForDocsSucceeded && Map.member sfCompiler corePackageGetters
then do
lift $ snapshotMarkUpdated snapshotId updatedOn
logInfo $ "Created or updated snapshot '" <> display snapName <> "' successfully"
else logError $ "There were errors while adding snapshot '" <> display snapName <> "'"
-- | Report how many packages has been loaded so far.
runProgressReporter :: IORef Int -> Int -> SnapName -> RIO StackageCron ()
runProgressReporter loadedPackageCountRef totalPackages snapName = do
let reportProgress = do
loadedPackageCount <- readIORef loadedPackageCountRef
when (loadedPackageCount < totalPackages) $ do
logSticky $
mconcat
[ "Loading snapshot '"
, display snapName
, "' ("
, displayShow loadedPackageCount
, "/"
, displayShow totalPackages
, ")"
]
threadDelay 1000000
reportProgress
reportProgress
-- | Uploads a json file to S3 with all latest snapshots per major lts version and one nightly.
uploadSnapshotsJSON :: RIO StackageCron ()
uploadSnapshotsJSON = do
snapshots <- snapshotsJSON
uploadBucket <- scUploadBucketName <$> ask
let key = ObjectKey "snapshots.json"
uploadFromRIO key $
set putObject_acl (Just ObjectCannedACL_Public_read) $
set putObject_contentType (Just "application/json") $
newPutObject (BucketName uploadBucket) key (toBody snapshots)
-- | Writes a gzipped version of hoogle db into temporary file onto the file system and then uploads
-- it to S3. Temporary file is removed upon completion
uploadHoogleDB :: FilePath -> ObjectKey -> RIO StackageCron ()
uploadHoogleDB fp key =
withTempFile (takeDirectory fp) (takeFileName fp <.> "gz") $ \fpgz h -> do
runConduitRes $ sourceFile fp .| compress 9 (WindowBits 31) .| CB.sinkHandle h
hClose h
body <- toBody <$> readFileBinary fpgz
uploadBucket <- scUploadBucketName <$> ask
uploadFromRIO key $
set putObject_acl (Just ObjectCannedACL_Public_read) $ newPutObject (BucketName uploadBucket) key body
uploadFromRIO :: (AWSRequest a, Typeable a, Typeable (AWSResponse a)) => ObjectKey -> a -> RIO StackageCron ()
uploadFromRIO key po = do
logInfo $ "Uploading " <> displayShow key <> " to S3 bucket."
env <- asks scEnvAWS
eres <- runResourceT $ trying _Error $ send env po
case eres of
Left e ->
logError $ "Couldn't upload " <> displayShow key <> " to S3 becuase " <> displayShow e
Right _ -> logInfo $ "Successfully uploaded " <> displayShow key <> " to S3"
buildAndUploadHoogleDB :: Bool -> RIO StackageCron ()
buildAndUploadHoogleDB doNotUpload = do
snapshots <- lastLtsNightlyWithoutHoogleDb 5 5
-- currentHoogleVersionId <- scHoogleVersionId <$> ask
env <- ask
awsEnv <- asks scEnvAWS
bucketUrl <- asks scDownloadBucketUrl
-- locker is an action that returns the path to a hoogle db, if one exists
-- in the haddock bucket already.
locker <- newHoogleLocker (env ^. logFuncL) (awsEnv ^. env_manager) bucketUrl
let insertH = checkInsertSnapshotHoogleDb True
checkH = checkInsertSnapshotHoogleDb False
for_ snapshots $ \(snapshotId, snapName) ->
-- Even though we just got a list of snapshots that don't have hoogle
-- databases, we check again. For some reason. I don't see how this can
-- actually be useful. both lastLtsNightlyWithoutHoogleDb and
-- checkInsertSnapshotHoogleDb just check against SnapshotHoogleDb.
-- Perhaps the check can be removed.
unlessM (checkH snapshotId) $ do
logInfo $ "Starting Hoogle database download: " <> display (hoogleKey snapName)
mfp <- singleRun locker snapName
case mfp of
Just _ -> do
logInfo $ "Current hoogle database exists for: " <> display snapName
void $ insertH snapshotId
Nothing -> do
logInfo $ "Current hoogle database does not yet exist in the bucket for: " <> display snapName
mfp' <- createHoogleDB snapshotId snapName
forM_ mfp' $ \fp -> do
let key = hoogleKey snapName
dest = T.unpack key
createDirectoryIfMissing True $ takeDirectory dest
renamePath fp dest
unless doNotUpload $ do
uploadHoogleDB dest (ObjectKey key)
void $ insertH snapshotId
-- | Create a hoogle db from haddocks for the given snapshot, and upload it to
-- the haddock bucket.
createHoogleDB :: SnapshotId -> SnapName -> RIO StackageCron (Maybe FilePath)
createHoogleDB snapshotId snapName =
handleAny logException $ do
logInfo $ "Creating Hoogle DB for " <> display snapName
downloadBucketUrl <- scDownloadBucketUrl <$> ask
let root = "hoogle-gen"
bindir = root </> "bindir"
outname = root </> "output.hoo"
tarKey = toPathPiece snapName <> "/hoogle/orig.tar"
tarUrl = downloadBucketUrl <> "/" <> tarKey
tarFP = root </> T.unpack tarKey
-- When tarball is downloaded it is saved with durability and atomicity, so if it
-- is present it is not in a corrupted state
unlessM (doesFileExist tarFP) $ do
req <- parseRequest $ T.unpack tarUrl
env <- asks scEnvAWS
let man = env ^. env_manager
withResponseUnliftIO req {decompress = const True} man $ \res -> do
throwErrorStatusCodes req res
createDirectoryIfMissing True $ takeDirectory tarFP
withBinaryFileDurableAtomic tarFP WriteMode $ \tarHandle ->
runConduitRes $ bodyReaderSource (responseBody res) .| sinkHandle tarHandle
void $ tryIO $ removeDirectoryRecursive bindir
void $ tryIO $ removeFile outname
createDirectoryIfMissing True bindir
withSystemTempDirectory ("hoogle-" ++ T.unpack (textDisplay snapName)) $ \tmpdir -> do
Any hasRestored <-
runConduitRes $
sourceFile tarFP .|
untar (restoreHoogleTxtFileWithCabal tmpdir snapshotId snapName) .|
foldMapC Any
unless hasRestored $ error "No Hoogle .txt files found"
let args = ["generate", "--database=" ++ outname, "--local=" ++ tmpdir]
logInfo $
mconcat
[ "Merging databases... ("
, foldMap fromString $ L.intersperse " " ("hoogle" : args)
, ")"
]
liftIO $ Hoogle.hoogle args
logInfo "Merge done"
return $ Just outname
where
logException exc =
logError ("Problem creating hoogle db for " <> display snapName <> ": " <> displayShow exc) $>
Nothing
-- | Grabs hoogle txt file from the tarball and a matching cabal file from pantry. Writes
-- them into supplied temp directory and yields the result of operation as a boolean for
-- every tar entry.
restoreHoogleTxtFileWithCabal ::
FilePath
-> SnapshotId
-> SnapName
-> FileInfo
-> ConduitM ByteString Bool (ResourceT (RIO StackageCron)) ()
restoreHoogleTxtFileWithCabal tmpdir snapshotId snapName fileInfo =
case fileType fileInfo of
FTNormal -> do
let txtFileName = T.decodeUtf8With T.lenientDecode $ filePath fileInfo
txtPackageName = T.takeWhile (/= '.') txtFileName
mpkg = fromPathPiece txtPackageName
maybe (pure Nothing) (lift . lift . getSnapshotPackageCabalBlob snapshotId) mpkg >>= \case
Nothing -> do
logWarn $
"Unexpected hoogle filename: " <> display txtFileName <>
" in orig.tar for snapshot: " <>
display snapName
yield False
Just cabal -> do
writeFileBinary (tmpdir </> T.unpack txtPackageName <.> "cabal") cabal
sinkFile (tmpdir </> T.unpack txtFileName)
yield True
_ -> yield False
pathToPackageModule :: Text -> Maybe (PackageIdentifierP, ModuleNameP)
pathToPackageModule txt =
case T.split (== '/') txt of
[pkgIdentifier, moduleNameDashes] -> do
modName :: ModuleNameP <- fromPathPiece moduleNameDashes
pkgId :: PackageIdentifierP <- fromPathPiece pkgIdentifier
Just (pkgId, modName)
_ -> Nothing