Skip to content

Commit

Permalink
nested
Browse files Browse the repository at this point in the history
  • Loading branch information
willmcgugan committed Jan 2, 2024
1 parent b8fccd4 commit e5536b3
Show file tree
Hide file tree
Showing 4 changed files with 134 additions and 38 deletions.
5 changes: 3 additions & 2 deletions src/textual/css/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ class SelectorType(Enum):
TYPE = 2
CLASS = 3
ID = 4
NESTED = 5


class CombinatorType(Enum):
Expand Down Expand Up @@ -138,6 +139,7 @@ def css(self) -> str:
def __rich_repr__(self) -> rich.repr.Result:
selectors = RuleSet._selector_to_css(self.selectors)
yield selectors
yield self.selectors
yield None, self.specificity

@classmethod
Expand Down Expand Up @@ -175,8 +177,7 @@ def _selector_to_css(cls, selectors: list[Selector]) -> str:
elif selector.combinator == CombinatorType.CHILD:
tokens.append(" > ")
tokens.append(selector.css)
for pseudo_class in selector.pseudo_classes:
tokens.append(f":{pseudo_class}")

return "".join(tokens).strip()

@property
Expand Down
113 changes: 92 additions & 21 deletions src/textual/css/parse.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import dataclasses
from functools import lru_cache
from typing import Iterable, Iterator, NoReturn

Expand Down Expand Up @@ -29,14 +30,15 @@
"selector_start_id": (SelectorType.ID, (1, 0, 0)),
"selector_universal": (SelectorType.UNIVERSAL, (0, 0, 0)),
"selector_start_universal": (SelectorType.UNIVERSAL, (0, 0, 0)),
"nested": (SelectorType.NESTED, (0, 0, 0)),
}


@lru_cache(maxsize=1024)
def parse_selectors(css_selectors: str) -> tuple[SelectorSet, ...]:
if not css_selectors.strip():
return ()

print(css_selectors)
tokens = iter(tokenize(css_selectors, ("", "")))

get_selector = SELECTOR_MAP.get
Expand All @@ -46,10 +48,13 @@ def parse_selectors(css_selectors: str) -> tuple[SelectorSet, ...]:

while True:
try:
token = next(tokens)
token = next(tokens, None)
except EOFError:
break
if token is None:
break
token_name = token.name

if token_name == "pseudo_class":
selectors[-1]._add_pseudo_class(token.value.lstrip(":"))
elif token_name == "whitespace":
Expand Down Expand Up @@ -103,7 +108,7 @@ def parse_rule_set(
if combinator is None or combinator == CombinatorType.SAME:
combinator = CombinatorType.DESCENDENT
elif token.name == "new_selector":
rule_selectors.append(selectors[:])
rule_selectors.append(selectors)
selectors.clear()
combinator = None
elif token.name == "declaration_set_start":
Expand All @@ -126,31 +131,91 @@ def parse_rule_set(

token = next(tokens)

if selectors:
if scope and selectors[0].name != scope:
scope_selector, scope_specificity = get_selector(
scope, (SelectorType.TYPE, (0, 0, 0))
)
selectors.insert(
0,
Selector(
name=scope,
combinator=CombinatorType.DESCENDENT,
type=scope_selector,
specificity=scope_specificity,
),
)
rule_selectors.append(selectors[:])
if selectors and scope and selectors[0].name != scope:
scope_selector, scope_specificity = get_selector(
scope, (SelectorType.TYPE, (0, 0, 0))
)
selectors.insert(
0,
Selector(
name=scope,
combinator=CombinatorType.DESCENDENT,
type=scope_selector,
specificity=scope_specificity,
),
)
rule_selectors.append(selectors)

declaration = Declaration(token, "")

errors: list[tuple[Token, str | HelpText]] = []

while True:
token = next(tokens)

token_name = token.name
if token_name in ("whitespace", "declaration_end"):
continue
if token_name in {
"selector_start_id",
"selector_start_class",
"selector_start_universal",
"selector_start",
"nested",
}:
recursive_parse: list[RuleSet] = list(
parse_rule_set(
"",
tokens,
token,
is_default_rules=is_default_rules,
tie_breaker=tie_breaker,
)
)

def combine_selectors(
selectors1: list[Selector], selectors2: list[Selector]
) -> list[Selector]:
if selectors2 and selectors2[0].type == SelectorType.NESTED:
final_selector = selectors1[-1]
nested_selector = selectors2[0]

merged_selector = dataclasses.replace(
final_selector,
pseudo_classes=list(
set(
final_selector.pseudo_classes
+ nested_selector.pseudo_classes
)
),
)

return [*selectors1[:-1], merged_selector, *selectors2[1:]]

else:
return selectors1 + selectors2

for rule_selector in rule_selectors:
for rule_set in recursive_parse:
nested_rule_set = RuleSet(
[
SelectorSet(
combine_selectors(
rule_selector, recursive_selectors.selectors
),
recursive_selectors.specificity,
)
for recursive_selectors in rule_set.selector_set
],
rule_set.styles,
rule_set.errors,
rule_set.is_default_rules,
rule_set.tie_breaker,
rule_set.selector_names,
rule_set.pseudo_classes,
)
nested_rule_set._post_parse()
yield nested_rule_set
continue
if token_name == "declaration_name":
try:
styles_builder.add_declaration(declaration)
Expand Down Expand Up @@ -271,7 +336,14 @@ def substitute_references(
"""
variables: dict[str, list[Token]] = css_variables.copy() if css_variables else {}

iter_tokens = iter(tokens)
# iter_tokens = iter(tokens)

def make_iter_tokens():
for token in tokens:
print(" ", repr(token))
yield token

iter_tokens = iter(make_iter_tokens())

while True:
token = next(iter_tokens, None)
Expand Down Expand Up @@ -357,7 +429,6 @@ def parse(
is_default_rules: True if the rules we're extracting are
default (i.e. in Widget.DEFAULT_CSS) rules. False if they're from user defined CSS.
"""

reference_tokens = tokenize_values(variables) if variables is not None else {}
if variable_tokens:
reference_tokens.update(variable_tokens)
Expand Down
22 changes: 20 additions & 2 deletions src/textual/css/tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
# in the CSS file. At this level we might expect to see selectors, comments,
# variable definitions etc.
expect_root_scope = Expect(
"selector or end of file",
whitespace=r"\s+",
comment_start=COMMENT_START,
comment_line=COMMENT_LINE,
Expand All @@ -55,11 +56,13 @@
selector_start_universal=r"\*",
selector_start=IDENTIFIER,
variable_name=rf"{VARIABLE_REF}:",
declaration_set_end=r"\}",
).expect_eof(True)

# After a variable declaration e.g. "$warning-text: TOKENS;"
# for tokenizing variable value ------^~~~~~~^
expect_variable_name_continue = Expect(
"variable value",
variable_value_end=r"\n|;",
whitespace=r"\s+",
comment_start=COMMENT_START,
Expand All @@ -68,12 +71,14 @@
).expect_eof(True)

expect_comment_end = Expect(
"comment end",
comment_end=re.escape("*/"),
)

# After we come across a selector in CSS e.g. ".my-class", we may
# find other selectors, pseudo-classes... e.g. ".my-class :hover"
expect_selector_continue = Expect(
"selectors or {",
whitespace=r"\s+",
comment_start=COMMENT_START,
comment_line=COMMENT_LINE,
Expand All @@ -85,19 +90,29 @@
combinator_child=">",
new_selector=r",",
declaration_set_start=r"\{",
)
declaration_set_end=r"\}",
nested=r"\&",
).expect_eof(True)

# A rule declaration e.g. "text: red;"
# ^---^
expect_declaration = Expect(
"rule declaration",
nested=r"\&",
whitespace=r"\s+",
comment_start=COMMENT_START,
comment_line=COMMENT_LINE,
declaration_name=r"[a-zA-Z_\-]+\:",
declaration_set_end=r"\}",
#
selector_start_id=r"\#" + IDENTIFIER,
selector_start_class=r"\." + IDENTIFIER,
selector_start_universal=r"\*",
selector_start=IDENTIFIER,
)

expect_declaration_solo = Expect(
"rule declaration",
whitespace=r"\s+",
comment_start=COMMENT_START,
comment_line=COMMENT_LINE,
Expand All @@ -108,6 +123,7 @@
# The value(s)/content from a rule declaration e.g. "text: red;"
# ^---^
expect_declaration_content = Expect(
"rule value or end of declaration",
declaration_end=r";",
whitespace=r"\s+",
comment_start=COMMENT_START,
Expand All @@ -119,6 +135,7 @@
)

expect_declaration_content_solo = Expect(
"rule value or end of declaration",
declaration_end=r";",
whitespace=r"\s+",
comment_start=COMMENT_START,
Expand Down Expand Up @@ -156,7 +173,8 @@ class TokenizerState:
"declaration_set_start": expect_declaration,
"declaration_name": expect_declaration_content,
"declaration_end": expect_declaration,
"declaration_set_end": expect_root_scope,
"declaration_set_end": expect_selector_continue,
"nested": expect_selector_continue,
}

def __call__(self, code: str, read_from: CSSLocation) -> Iterable[Token]:
Expand Down
32 changes: 19 additions & 13 deletions src/textual/css/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,10 @@ class EOFError(TokenError):
pass


@rich.repr.auto
class Expect:
def __init__(self, **tokens: str) -> None:
def __init__(self, description: str, **tokens: str) -> None:
self.description = f"Expected {description}"
self.names = list(tokens.keys())
self.regexes = list(tokens.values())
self._regex = re.compile(
Expand All @@ -134,7 +136,7 @@ class ReferencedBy(NamedTuple):
code: str


@rich.repr.auto
@rich.repr.auto(angular=True)
class Token(NamedTuple):
name: str
value: str
Expand All @@ -144,6 +146,10 @@ class Token(NamedTuple):
"""Token starting location, 0-indexed."""
referenced_by: ReferencedBy | None = None

def __rich_repr__(self) -> rich.repr.Result:
yield self.name
yield self.value

@property
def start(self) -> tuple[int, int]:
"""Start line and column (1-indexed)."""
Expand Down Expand Up @@ -175,16 +181,16 @@ def with_reference(self, by: ReferencedBy | None) -> "Token":
def __str__(self) -> str:
return self.value

def __rich_repr__(self) -> rich.repr.Result:
yield "name", self.name
yield "value", self.value
yield (
"read_from",
self.read_from[0] if not self.read_from[1] else self.read_from,
)
yield "code", self.code if len(self.code) < 40 else self.code[:40] + "..."
yield "location", self.location
yield "referenced_by", self.referenced_by, None
# def __rich_repr__(self) -> rich.repr.Result:
# yield "name", self.name
# yield "value", self.value
# yield (
# "read_from",
# self.read_from[0] if not self.read_from[1] else self.read_from,
# )
# yield "code", self.code if len(self.code) < 40 else self.code[:40] + "..."
# yield "location", self.location
# yield "referenced_by", self.referenced_by, None


class Tokenizer:
Expand Down Expand Up @@ -224,7 +230,7 @@ def get_token(self, expect: Expect) -> Token:
self.read_from,
self.code,
(line_no + 1, col_no + 1),
message,
f"{expect.description}; found {line[col_no:].rstrip()!r})." or message,
)
iter_groups = iter(match.groups())

Expand Down

0 comments on commit e5536b3

Please sign in to comment.