-
Notifications
You must be signed in to change notification settings - Fork 17
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
https://github.com/jackdewinter/pymarkdown/issues/1247 #1248
Changes from all commits
b21bcd9
60caebf
69f549d
8d4286e
a5c5e58
d4370e9
ec34bc6
a06323e
63a0599
472be97
309a3b8
6220698
6721ca2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -56,6 +56,7 @@ def __init__(self) -> None: | |
self.__removed_container_adjustments: Optional[ | ||
List[PendingContainerAdjustment] | ||
] = None | ||
self.__last_end_container_tokens: List[MarkdownToken] = [] | ||
|
||
self.__leading_space_index_tracker = LeadingSpaceIndexTracker() | ||
|
||
|
@@ -513,17 +514,26 @@ def __fix_spacing( | |
new_token.adjust_line_number(context, self.__fix_count) | ||
assert self.__last_token is not None | ||
if token.is_fenced_code_block and self.__last_token.is_list_end: | ||
end_container_index = len(self.__last_end_container_tokens) - 1 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. issue (code-quality): Extract code out into method ( |
||
while ( | ||
end_container_index >= 0 | ||
and self.__last_end_container_tokens[end_container_index].is_list_end | ||
): | ||
end_container_index -= 1 | ||
first_token = self.__last_end_container_tokens[end_container_index + 1] | ||
replacement_tokens: List[MarkdownToken] = [ | ||
BlankLineMarkdownToken( | ||
extracted_whitespace="", | ||
position_marker=PositionMarker(new_token.line_number - 1, 0, ""), | ||
column_delta=1, | ||
), | ||
self.__last_token, | ||
new_token, | ||
) | ||
] | ||
replacement_tokens.extend( | ||
self.__last_end_container_tokens[end_container_index + 1 :] | ||
) | ||
replacement_tokens.append(new_token) | ||
self.register_replace_tokens_request( | ||
context, self.__last_token, token, replacement_tokens | ||
context, first_token, token, replacement_tokens | ||
) | ||
elif did_special_list_fix: | ||
assert self.__last_token and self.__last_token.is_block_quote_end | ||
|
@@ -755,6 +765,11 @@ def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None: | |
): | ||
self.__last_non_end_token = token | ||
|
||
if token.is_block_quote_end or token.is_list_end: | ||
self.__last_end_container_tokens.append(token) | ||
else: | ||
self.__last_end_container_tokens.clear() | ||
|
||
self.__second_last_token = self.__last_token | ||
self.__last_token = token | ||
self.__removed_container_stack_token = None | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -426,9 +426,55 @@ def __abcd( | |
|
||
# pylint: enable=too-many-arguments, too-many-boolean-expressions | ||
|
||
@staticmethod | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. issue (complexity): Consider consolidating the indent calculation logic into a single method without stack manipulation. The split into @staticmethod
def __abcd_final_list(
removed_tokens: List[MarkdownToken],
removed_token_indices: List[int],
container_stack: List[MarkdownToken],
) -> Optional[str]:
# Handle list token prefix
removed_list_token = cast(ListStartMarkdownToken, removed_tokens[0])
assert removed_list_token.leading_spaces is not None
split_leading_spaces = removed_list_token.leading_spaces.split("\n")
possible_prefix = None
if removed_token_indices[0] < len(split_leading_spaces):
possible_prefix = split_leading_spaces[removed_token_indices[0]]
del removed_tokens[0]
del removed_token_indices[0]
# Calculate indent if only list tokens
if not any(t.is_block_quote_start for t in container_stack + removed_tokens):
current_indent_level = (container_stack[-1].indent_level
if container_stack else 0)
return (" " * current_indent_level if possible_prefix is None
else possible_prefix)
# Handle block quote case
if removed_tokens[0].is_block_quote_start:
bq_token = cast(BlockQuoteMarkdownToken, removed_tokens[0])
assert bq_token.bleading_spaces is not None
split_spaces = bq_token.bleading_spaces.split("\n")
assert removed_token_indices[0] < len(split_spaces)
prefix = "" if possible_prefix is None else possible_prefix
prefix = f"{split_spaces[removed_token_indices[0]]}{prefix}"
del removed_tokens[0]
del removed_token_indices[0]
return prefix
return None This approach:
|
||
def __abcd_final_list_second_half( | ||
removed_tokens: List[MarkdownToken], | ||
removed_token_indices: List[int], | ||
container_stack: List[MarkdownToken], | ||
possible_prefix: Optional[str], | ||
) -> Optional[str]: | ||
container_stack_copy: List[MarkdownToken] = container_stack[:] | ||
container_stack_copy.extend(removed_tokens[::-1]) | ||
is_only_list_starts_on_stack = not any( | ||
i.is_block_quote_start for i in container_stack_copy | ||
) | ||
|
||
prefix = None | ||
if is_only_list_starts_on_stack and possible_prefix is not None: | ||
prefix = possible_prefix | ||
token_index = len(removed_tokens) - 1 | ||
while token_index >= 0: | ||
list_token = cast(ListStartMarkdownToken, removed_tokens[token_index]) | ||
assert list_token.leading_spaces is None | ||
assert removed_token_indices[token_index] == 0 | ||
token_index -= 1 | ||
del removed_tokens[0] | ||
del removed_token_indices[0] | ||
|
||
list_token = cast(ListStartMarkdownToken, container_stack[-1]) | ||
current_indent_level = list_token.indent_level if container_stack else 0 | ||
prefix = " " * current_indent_level | ||
else: | ||
if removed_tokens[0].is_block_quote_start: | ||
removed_block_quote_token = cast( | ||
BlockQuoteMarkdownToken, removed_tokens[0] | ||
) | ||
assert removed_block_quote_token.bleading_spaces is not None | ||
split_bleading_spaces = removed_block_quote_token.bleading_spaces.split( | ||
"\n" | ||
) | ||
assert removed_token_indices[0] < len(split_bleading_spaces) | ||
prefix = "" if possible_prefix is None else possible_prefix | ||
prefix = f"{split_bleading_spaces[removed_token_indices[0]]}{prefix}" | ||
del removed_tokens[0] | ||
del removed_token_indices[0] | ||
return prefix | ||
|
||
@staticmethod | ||
def __abcd_final_list( | ||
removed_tokens: List[MarkdownToken], removed_token_indices: List[int] | ||
removed_tokens: List[MarkdownToken], | ||
removed_token_indices: List[int], | ||
container_stack: List[MarkdownToken], | ||
) -> Optional[str]: | ||
removed_list_token = cast(ListStartMarkdownToken, removed_tokens[0]) | ||
assert removed_list_token.leading_spaces is not None | ||
|
@@ -437,26 +483,17 @@ def __abcd_final_list( | |
# prefix is None for most cases | ||
possible_prefix = None | ||
if ( | ||
removed_token_indices[0] < len(split_leading_spaces) | ||
and split_leading_spaces[removed_token_indices[0]] | ||
removed_token_indices[0] | ||
< len(split_leading_spaces) | ||
# and split_leading_spaces[removed_token_indices[0]] | ||
): | ||
possible_prefix = split_leading_spaces[removed_token_indices[0]] | ||
del removed_tokens[0] | ||
del removed_token_indices[0] | ||
|
||
prefix = None | ||
if removed_tokens[0].is_block_quote_start: | ||
removed_block_quote_token = cast(BlockQuoteMarkdownToken, removed_tokens[0]) | ||
assert removed_block_quote_token.bleading_spaces is not None | ||
split_bleading_spaces = removed_block_quote_token.bleading_spaces.split( | ||
"\n" | ||
) | ||
assert removed_token_indices[0] < len(split_bleading_spaces) | ||
prefix = "" if possible_prefix is None else possible_prefix | ||
prefix = f"{split_bleading_spaces[removed_token_indices[0]]}{prefix}" | ||
del removed_tokens[0] | ||
del removed_token_indices[0] | ||
return prefix | ||
return TransformContainers.__abcd_final_list_second_half( | ||
removed_tokens, removed_token_indices, container_stack, possible_prefix | ||
) | ||
|
||
@staticmethod | ||
def __abcd_final( | ||
|
@@ -506,7 +543,7 @@ def __abcd_final( | |
del removed_token_indices[0] | ||
else: | ||
prefix = TransformContainers.__abcd_final_list( | ||
removed_tokens, removed_token_indices | ||
removed_tokens, removed_token_indices, container_stack | ||
) | ||
return prefix | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
issue (complexity): Consider refactoring container end token handling to use a recursive traversal approach instead of maintaining state
The current implementation maintains ongoing state via
__last_end_container_tokens
and uses complex index manipulation to process them. This can be simplified while keeping the enhanced multi-container functionality:This approach: