Skip to content

Commit

Permalink
Merge pull request NixOS#184 from NixOS/hlint
Browse files Browse the repository at this point in the history
Enforce hlint
  • Loading branch information
piegamesde authored Apr 15, 2024
2 parents 35b0186 + 54b4fbf commit 3cb264c
Show file tree
Hide file tree
Showing 12 changed files with 90 additions and 90 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ jobs:
- name: reuse lint
run: nix-build -A packages.reuse && result/bin/reuse lint

- name: hlint
run: nix-build -A checks.hlint

- name: build nixfmt
run: nix-build
if: success() || failure()
Expand Down
3 changes: 2 additions & 1 deletion default.nix
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,12 @@ build
haskellPackages.haskell-language-server
shellcheck
npins
hlint
];
};

checks = {
hlint = pkgs.build.haskell.hlint ./.;
hlint = pkgs.build.haskell.hlint src;
stylish-haskell = pkgs.build.haskell.stylish-haskell ./.;
};
}
2 changes: 1 addition & 1 deletion main/Main.hs
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ checkTarget format Target{tDoRead, tPath} = do
| otherwise -> Left $ tPath ++ ": not formatted"

stdioTarget :: Target
stdioTarget = Target TextIO.getContents "<stdin>" (const $ TextIO.putStr)
stdioTarget = Target TextIO.getContents "<stdin>" (const TextIO.putStr)

fileTarget :: FilePath -> Target
fileTarget path = Target (readFileUtf8 path) path atomicWriteFile
Expand Down
3 changes: 1 addition & 2 deletions main/System/IO/Utf8.hs
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@ withUtf8StdHandles :: IO a -> IO a
withUtf8StdHandles action =
withConfiguredHandle stdin $
withConfiguredHandle stdout $
withConfiguredHandle stderr $
action
withConfiguredHandle stderr action
where
withConfiguredHandle :: IO.Handle -> IO a -> IO a
withConfiguredHandle h = bracket (hSetBestUtf8Enc h) ($ h) . const
Expand Down
9 changes: 4 additions & 5 deletions src/Nixfmt.hs
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,16 @@ formatVerify :: Width -> FilePath -> Text -> Either String Text
formatVerify width path unformatted = do
unformattedParsed@(Whole unformattedParsed' _) <- parse unformatted
let formattedOnce = layout width unformattedParsed
formattedOnceParsed <- flip first (parse formattedOnce) $
(\x -> pleaseReport "Fails to parse after formatting.\n" <> x <> "\n\nAfter Formatting:\n" <> unpack formattedOnce)
formattedOnceParsed <- first (\x -> pleaseReport "Fails to parse after formatting.\n" <> x <> "\n\nAfter Formatting:\n" <> unpack formattedOnce) (parse formattedOnce)
let formattedTwice = layout width formattedOnceParsed
if formattedOnceParsed /= unformattedParsed
then Left $
let
minimized = minimize unformattedParsed' (\e -> parse (layout width e) == Right (Whole e []))
in
pleaseReport "Parses differently after formatting."
<> "\n\nBefore formatting:\n" <> (show minimized)
<> "\n\nAfter formatting:\n" <> (show $ fromRight (error "TODO") $ parse (layout width minimized))
<> "\n\nBefore formatting:\n" <> show minimized
<> "\n\nAfter formatting:\n" <> show (fromRight (error "TODO") $ parse (layout width minimized))
else if formattedOnce /= formattedTwice
then Left $
let
Expand All @@ -67,6 +66,6 @@ formatVerify width path unformatted = do

minimize :: Expression -> (Expression -> Bool) -> Expression
minimize expr test =
case concatMap (\e -> case test e of { False -> [minimize e test]; True -> [] }) $ walkSubprograms expr of
case concatMap (\e -> ([minimize e test | not (test e)])) $ walkSubprograms expr of
result:_ -> result
[] -> expr
8 changes: 4 additions & 4 deletions src/Nixfmt/Lexer.hs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{-# LANGUAGE BlockArguments, FlexibleContexts, LambdaCase, OverloadedStrings, TupleSections #-}
{-# LANGUAGE BlockArguments, FlexibleContexts, LambdaCase, OverloadedStrings #-}

module Nixfmt.Lexer (lexeme, pushTrivia, takeTrivia, whole) where

Expand Down Expand Up @@ -47,7 +47,7 @@ blockComment = try $ preLexeme $ do
let pos' = unPos pos - 1
_ <- chunk "/*"
-- Try to parse /** before /*, but don't parse /**/ (i.e. the empty comment)
isDoc <- try (const True <$> char '*' <* notFollowedBy (char '/')) <|> pure False
isDoc <- try ((True <$ char '*') <* notFollowedBy (char '/')) <|> pure False

chars <- manyTill anySingle $ chunk "*/"
return $ PTBlockComment isDoc $ dropWhile Text.null $ fixIndent pos' $ removeStars pos' $ splitLines $ pack chars
Expand Down Expand Up @@ -83,7 +83,7 @@ blockComment = try $ preLexeme $ do
stripIndentation n t = fromMaybe (stripStart t) $ stripPrefix (Text.replicate n " ") t

commonIndentationLength :: Int -> [Text] -> Int
commonIndentationLength def = foldr min def . map (Text.length . Text.takeWhile (==' '))
commonIndentationLength = foldr (min . Text.length . Text.takeWhile (==' '))

-- This should be called with zero or one elements, as per `span isTrailing`
convertTrailing :: [ParseTrivium] -> Maybe TrailingComment
Expand Down Expand Up @@ -119,7 +119,7 @@ convertTrivia pts nextCol =
-- This happens especially often after `{` or `[` tokens, where the comment of the first item
-- starts on the same line ase the opening token.
([PTLineComment _ pos], (PTNewlines 1):(PTLineComment _ pos'):_) | pos == pos' -> (Nothing, convertLeading pts)
([PTLineComment _ pos], [(PTNewlines 1)]) | pos == nextCol -> (Nothing, convertLeading pts)
([PTLineComment _ pos], [PTNewlines 1]) | pos == nextCol -> (Nothing, convertLeading pts)
_ -> (convertTrailing trailing, convertLeading leading)

trivia :: Parser [ParseTrivium]
Expand Down
6 changes: 3 additions & 3 deletions src/Nixfmt/Parser.hs
Original file line number Diff line number Diff line change
Expand Up @@ -227,9 +227,9 @@ parens = Parenthesized <$>

simpleSelector :: Parser StringPart -> Parser SimpleSelector
simpleSelector parseInterpolation =
((IDSelector <$> identifier) <|>
(IDSelector <$> identifier) <|>
(InterpolSelector <$> lexeme parseInterpolation) <|>
(StringSelector <$> lexeme simpleString))
(StringSelector <$> lexeme simpleString)

selector :: Maybe (Parser Leaf) -> Parser Selector
selector parseDot = Selector <$>
Expand All @@ -246,7 +246,7 @@ selectorPath' = many $ try $ selector $ Just $ symbol TDot
-- Everything but selection
simpleTerm :: Parser Term
simpleTerm =
(SimpleString <$> (lexeme $ simpleString <|> uri))
(SimpleString <$> lexeme (simpleString <|> uri))
<|> (IndentedString <$> lexeme indentedString)
<|> (Path <$> path)
<|> (Token <$> (envPath <|> float <|> integer <|> identifier))
Expand Down
4 changes: 2 additions & 2 deletions src/Nixfmt/Parser/Float.hs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ import "scientific" Data.Scientific (toRealFloat, scientific)
data SP = SP !Integer {-# UNPACK #-} !Int
floatParse :: (MonadParsec e s m, Token s ~ Char, RealFloat a) => m a
floatParse = do
notFollowedBy $ (char '0') >> digitChar
notFollowedBy $ (char' 'e')
notFollowedBy $ char '0' >> digitChar
notFollowedBy (char' 'e')
c' <- (decimal <?> "decimal") <|> return 0
toRealFloat
<$> (( do
Expand Down
30 changes: 15 additions & 15 deletions src/Nixfmt/Predoc.hs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{-# LANGUAGE FlexibleInstances, OverloadedStrings, LambdaCase #-}
{-# LANGUAGE FlexibleInstances, OverloadedStrings #-}

module Nixfmt.Predoc
( text
Expand Down Expand Up @@ -151,7 +151,7 @@ trailing t = [Text 0 0 Trailing t]
-- Must not contain non-hard whitespace (e.g. line, softline' etc.) at the start of the end.
-- Use group' for that instead if you are sure of what you are doing.
group :: HasCallStack => Pretty a => a -> Doc
group x = pure . (Group RegularG) $
group x = pure . Group RegularG $
if p /= [] && (isSoftSpacing (head p) || isSoftSpacing (last p)) then
error $ "group should not start or end with whitespace, use `group'` if you are sure; " <> show p
else
Expand All @@ -166,7 +166,7 @@ group x = pure . (Group RegularG) $
--
-- Also allows to create priority groups (see Node Group documentation)
group' :: Pretty a => GroupAnn -> a -> Doc
group' ann = pure . (Group ann) . pretty
group' ann = pure . Group ann . pretty

-- | @nest doc@ declarse @doc@ to have a higher nesting depth
-- than before. Not all nestings actually result in indentation changes,
Expand Down Expand Up @@ -276,10 +276,10 @@ spanEnd p = fmap reverse . span p . reverse
unexpandSpacing' :: Maybe Int -> Doc -> Maybe Doc
unexpandSpacing' (Just n) _ | n < 0 = Nothing
unexpandSpacing' _ [] = Just []
unexpandSpacing' n (txt@(Text _ _ _ t):xs) = (txt :) <$> unexpandSpacing' (n <&> (subtract $ textWidth t)) xs
unexpandSpacing' n (Spacing Hardspace:xs) = (Spacing Hardspace :) <$> unexpandSpacing' (n <&> (subtract 1)) xs
unexpandSpacing' n (Spacing Space:xs) = (Spacing Hardspace :) <$> unexpandSpacing' (n <&> (subtract 1)) xs
unexpandSpacing' n (Spacing Softspace:xs) = (Spacing Hardspace :) <$> unexpandSpacing' (n <&> (subtract 1)) xs
unexpandSpacing' n (txt@(Text _ _ _ t):xs) = (txt :) <$> unexpandSpacing' (n <&> subtract (textWidth t)) xs
unexpandSpacing' n (Spacing Hardspace:xs) = (Spacing Hardspace :) <$> unexpandSpacing' (n <&> subtract 1) xs
unexpandSpacing' n (Spacing Space:xs) = (Spacing Hardspace :) <$> unexpandSpacing' (n <&> subtract 1) xs
unexpandSpacing' n (Spacing Softspace:xs) = (Spacing Hardspace :) <$> unexpandSpacing' (n <&> subtract 1) xs
unexpandSpacing' n (Spacing Break:xs) = unexpandSpacing' n xs
unexpandSpacing' n (Spacing Softbreak:xs) = unexpandSpacing' n xs
unexpandSpacing' _ (Spacing _:_) = Nothing
Expand Down Expand Up @@ -316,7 +316,7 @@ fixup (a@(Spacing _) : Group ann xs : ys) =
-- For the leading side, also move out comments out of groups, they are kinda the same thing
-- (We could move out trailing comments too but it would make no difference)
(pre, rest) = span (\x -> isHardSpacing x || isComment x) $ fixup xs
(post, body) = (second $ simplifyGroup ann) $ spanEnd isHardSpacing rest
(post, body) = second (simplifyGroup ann) $ spanEnd isHardSpacing rest
in if null body then
-- Dissolve empty group
fixup $ (a : pre) ++ post ++ ys
Expand All @@ -326,7 +326,7 @@ fixup (a@(Spacing _) : Group ann xs : ys) =
fixup (Group ann xs : ys) =
let
(pre, rest) = span (\x -> isHardSpacing x || isComment x) $ fixup xs
(post, body) = (second $ simplifyGroup ann) $ spanEnd isHardSpacing rest
(post, body) = second (simplifyGroup ann) $ spanEnd isHardSpacing rest
in if null body then
fixup $ pre ++ post ++ ys
else
Expand Down Expand Up @@ -380,7 +380,7 @@ priorityGroups = explode . mergeSegments . segments
| prio = [([], x, [])]
| otherwise = []
explode ((prio, x):xs)
| prio = ([], x, concatMap (snd) xs) : (map (\(a, b, c) -> (x<>a, b, c)) $ explode xs)
| prio = ([], x, concatMap snd xs) : map (\(a, b, c) -> (x<>a, b, c)) (explode xs)
| otherwise = map (\(a, b, c) -> (x<>a, b, c)) (explode xs)

-- | To support i18n, this function needs to be patched.
Expand Down Expand Up @@ -434,7 +434,7 @@ firstLineFits targetWidth maxWidth docs = go maxWidth docs
where go c _ | c < 0 = False
go c [] = maxWidth - c <= targetWidth
go c (Text _ _ RegularT t : xs) = go (c - textWidth t) xs
go c (Text _ _ _ _ : xs) = go c xs
go c (Text {} : xs) = go c xs
-- This case is impossible in the input thanks to fixup, but may happen
-- due to our recursion on groups below
go c (Spacing a : Spacing b : xs) = go c $ Spacing (mergeSpacings a b) : xs
Expand Down Expand Up @@ -534,7 +534,7 @@ layoutGreedy tw doc = Text.concat $ evalState (go [Group RegularG doc] []) (0, s
-- [ # comment
-- 1
-- ]
Text _ _ TrailingComment t | cc == 2 && (fst $ nextIndent xs) > lineNL -> putText' [" ", t]
Text _ _ TrailingComment t | cc == 2 && fst (nextIndent xs) > lineNL -> putText' [" ", t]
where lineNL = snd $ NonEmpty.head indents
Text nl off _ t -> putText nl off t

Expand Down Expand Up @@ -572,7 +572,7 @@ layoutGreedy tw doc = Text.concat $ evalState (go [Group RegularG doc] []) (0, s
-- Ignore transparent groups as their priority children have already been handled up in the parent (and failed)
<|> (if ann /= Transparent then
-- Each priority group will be handled individually, and the priority groups are tried in reverse order
asum $ map (flip goPriorityGroup xs) $ reverse $ priorityGroups ys
asum $ map (`goPriorityGroup` xs) $ reverse $ priorityGroups ys
else
empty
)
Expand All @@ -593,7 +593,7 @@ layoutGreedy tw doc = Text.concat $ evalState (go [Group RegularG doc] []) (0, s
-- Try to render post onto one line
postRendered <- goGroup post rest
-- If none of these failed, put together and return
return $ (preRendered ++ prioRendered ++ postRendered)
return (preRendered ++ prioRendered ++ postRendered)

-- Try to fit the group onto a single line, while accounting for the fact that the first
-- bits of rest must fit as well (until the first possibility for a line break within rest).
Expand All @@ -610,7 +610,7 @@ layoutGreedy tw doc = Text.concat $ evalState (go [Group RegularG doc] []) (0, s
-- therefore drop any leading whitespace within the group to avoid duplicate newlines
grp' = case head grp of
Spacing _ -> tail grp
Group ann ((Spacing _) : inner) -> (Group ann inner) : tail grp
Group ann ((Spacing _) : inner) -> Group ann inner : tail grp
_ -> grp
(nl, off) = nextIndent grp'

Expand Down
Loading

0 comments on commit 3cb264c

Please sign in to comment.