Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
jackdewinter authored Oct 28, 2024
1 parent cee1f3f commit 50a1e94
Show file tree
Hide file tree
Showing 7 changed files with 398 additions and 69 deletions.
4 changes: 4 additions & 0 deletions newdocs/src/changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@
- [Issue 1245](https://github.com/jackdewinter/pymarkdown/issues/1245)
- Handling leading spaces in `__fix_spacing` function now that data
is present.
- [Issue 1247](https://github.com/jackdewinter/pymarkdown/issues/1247)
- In 3+ drop cases with only lists and no block quotes, indent is
not calculated properly on rehydrate. This in turn causes the
fixed text to be wrong.

<!--- pyml disable-next-line no-duplicate-heading-->
### Changed
Expand Down
8 changes: 4 additions & 4 deletions publish/coverage.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@
"projectName": "pymarkdown",
"reportSource": "pytest",
"branchLevel": {
"totalMeasured": 5393,
"totalCovered": 5392
"totalMeasured": 5403,
"totalCovered": 5401
},
"lineLevel": {
"totalMeasured": 21094,
"totalCovered": 21093
"totalMeasured": 21123,
"totalCovered": 21122
}
}

4 changes: 2 additions & 2 deletions publish/test-results.json
Original file line number Diff line number Diff line change
Expand Up @@ -1367,7 +1367,7 @@
"totalTests": 653,
"failedTests": 0,
"errorTests": 0,
"skippedTests": 62,
"skippedTests": 61,
"elapsedTimeInMilliseconds": 0
},
{
Expand Down Expand Up @@ -1620,7 +1620,7 @@
},
{
"name": "test.test_markdown_extra",
"totalTests": 206,
"totalTests": 210,
"failedTests": 0,
"errorTests": 0,
"skippedTests": 3,
Expand Down
23 changes: 19 additions & 4 deletions pymarkdown/plugins/rule_md_031.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ def __init__(self) -> None:
self.__removed_container_adjustments: Optional[
List[PendingContainerAdjustment]
] = None
self.__last_end_container_tokens: List[MarkdownToken] = []

self.__leading_space_index_tracker = LeadingSpaceIndexTracker()

Expand Down Expand Up @@ -513,17 +514,26 @@ def __fix_spacing(
new_token.adjust_line_number(context, self.__fix_count)
assert self.__last_token is not None
if token.is_fenced_code_block and self.__last_token.is_list_end:
end_container_index = len(self.__last_end_container_tokens) - 1
while (
end_container_index >= 0
and self.__last_end_container_tokens[end_container_index].is_list_end
):
end_container_index -= 1
first_token = self.__last_end_container_tokens[end_container_index + 1]
replacement_tokens: List[MarkdownToken] = [
BlankLineMarkdownToken(
extracted_whitespace="",
position_marker=PositionMarker(new_token.line_number - 1, 0, ""),
column_delta=1,
),
self.__last_token,
new_token,
)
]
replacement_tokens.extend(
self.__last_end_container_tokens[end_container_index + 1 :]
)
replacement_tokens.append(new_token)
self.register_replace_tokens_request(
context, self.__last_token, token, replacement_tokens
context, first_token, token, replacement_tokens
)
elif did_special_list_fix:
assert self.__last_token and self.__last_token.is_block_quote_end
Expand Down Expand Up @@ -755,6 +765,11 @@ def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:
):
self.__last_non_end_token = token

if token.is_block_quote_end or token.is_list_end:
self.__last_end_container_tokens.append(token)
else:
self.__last_end_container_tokens.clear()

self.__second_last_token = self.__last_token
self.__last_token = token
self.__removed_container_stack_token = None
Expand Down
71 changes: 54 additions & 17 deletions pymarkdown/transform_markdown/transform_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -426,9 +426,55 @@ def __abcd(

# pylint: enable=too-many-arguments, too-many-boolean-expressions

@staticmethod
def __abcd_final_list_second_half(
removed_tokens: List[MarkdownToken],
removed_token_indices: List[int],
container_stack: List[MarkdownToken],
possible_prefix: Optional[str],
) -> Optional[str]:
container_stack_copy: List[MarkdownToken] = container_stack[:]
container_stack_copy.extend(removed_tokens[::-1])
is_only_list_starts_on_stack = not any(
i.is_block_quote_start for i in container_stack_copy
)

prefix = None
if is_only_list_starts_on_stack and possible_prefix is not None:
prefix = possible_prefix
token_index = len(removed_tokens) - 1
while token_index >= 0:
list_token = cast(ListStartMarkdownToken, removed_tokens[token_index])
assert list_token.leading_spaces is None
assert removed_token_indices[token_index] == 0
token_index -= 1
del removed_tokens[0]
del removed_token_indices[0]

list_token = cast(ListStartMarkdownToken, container_stack[-1])
current_indent_level = list_token.indent_level if container_stack else 0
prefix = " " * current_indent_level
else:
if removed_tokens[0].is_block_quote_start:
removed_block_quote_token = cast(
BlockQuoteMarkdownToken, removed_tokens[0]
)
assert removed_block_quote_token.bleading_spaces is not None
split_bleading_spaces = removed_block_quote_token.bleading_spaces.split(
"\n"
)
assert removed_token_indices[0] < len(split_bleading_spaces)
prefix = "" if possible_prefix is None else possible_prefix
prefix = f"{split_bleading_spaces[removed_token_indices[0]]}{prefix}"
del removed_tokens[0]
del removed_token_indices[0]
return prefix

@staticmethod
def __abcd_final_list(
removed_tokens: List[MarkdownToken], removed_token_indices: List[int]
removed_tokens: List[MarkdownToken],
removed_token_indices: List[int],
container_stack: List[MarkdownToken],
) -> Optional[str]:
removed_list_token = cast(ListStartMarkdownToken, removed_tokens[0])
assert removed_list_token.leading_spaces is not None
Expand All @@ -437,26 +483,17 @@ def __abcd_final_list(
# prefix is None for most cases
possible_prefix = None
if (
removed_token_indices[0] < len(split_leading_spaces)
and split_leading_spaces[removed_token_indices[0]]
removed_token_indices[0]
< len(split_leading_spaces)
# and split_leading_spaces[removed_token_indices[0]]
):
possible_prefix = split_leading_spaces[removed_token_indices[0]]
del removed_tokens[0]
del removed_token_indices[0]

prefix = None
if removed_tokens[0].is_block_quote_start:
removed_block_quote_token = cast(BlockQuoteMarkdownToken, removed_tokens[0])
assert removed_block_quote_token.bleading_spaces is not None
split_bleading_spaces = removed_block_quote_token.bleading_spaces.split(
"\n"
)
assert removed_token_indices[0] < len(split_bleading_spaces)
prefix = "" if possible_prefix is None else possible_prefix
prefix = f"{split_bleading_spaces[removed_token_indices[0]]}{prefix}"
del removed_tokens[0]
del removed_token_indices[0]
return prefix
return TransformContainers.__abcd_final_list_second_half(
removed_tokens, removed_token_indices, container_stack, possible_prefix
)

@staticmethod
def __abcd_final(
Expand Down Expand Up @@ -506,7 +543,7 @@ def __abcd_final(
del removed_token_indices[0]
else:
prefix = TransformContainers.__abcd_final_list(
removed_tokens, removed_token_indices
removed_tokens, removed_token_indices, container_stack
)
return prefix

Expand Down
Loading

0 comments on commit 50a1e94

Please sign in to comment.