Skip to content

Commit

Permalink
Merge branch 'master' into parameterizing_rules_3
Browse files Browse the repository at this point in the history
  • Loading branch information
ydah committed Nov 7, 2023
2 parents 4e3fea2 + 09f0c59 commit e9353bf
Show file tree
Hide file tree
Showing 9 changed files with 69 additions and 41 deletions.
1 change: 1 addition & 0 deletions Steepfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ target :lib do

check "lib/lrama/bitmap.rb"
check "lib/lrama/digraph.rb"
check "lib/lrama/grammar/counter.rb"
check "lib/lrama/grammar/percent_code.rb"
# TODO: Include this file once Lrama::Grammar::Symbol type is defined
# check "lib/lrama/grammar/reference.rb"
Expand Down
14 changes: 5 additions & 9 deletions lib/lrama/grammar.rb
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
require "lrama/grammar/auxiliary"
require "lrama/grammar/code"
require "lrama/grammar/counter"
require "lrama/grammar/error_token"
require "lrama/grammar/percent_code"
require "lrama/grammar/precedence"
Expand All @@ -23,7 +24,9 @@ class Grammar
:rules, :rule_builders,
:sym_to_rules

def initialize
def initialize(rule_counter)
@rule_counter = rule_counter

# Code defined by "%code"
@percent_codes = []
@printers = []
Expand Down Expand Up @@ -371,17 +374,11 @@ def normalize_rules
accept = find_symbol_by_s_value!("$accept")
eof = find_symbol_by_number!(0)
lineno = @rule_builders.first ? @rule_builders.first.line : 0
@rules << Rule.new(id: @rules.count, lhs: accept, rhs: [@rule_builders.first.lhs, eof], token_code: nil, lineno: lineno)

extracted_action_number = 1 # @n as nterm
@rules << Rule.new(id: @rule_counter.increment, lhs: accept, rhs: [@rule_builders.first.lhs, eof], token_code: nil, lineno: lineno)

@rule_builders.each do |builder|
builder.extracted_action_number = extracted_action_number
extracted_action_number += builder.midrule_action_rules.count

# Extract actions in the middle of RHS into new rules.
builder.midrule_action_rules.each do |rule|
rule.id = @rules.count
@rules << rule
end

Expand All @@ -391,7 +388,6 @@ def normalize_rules

builder.build_rules.each do |rule|
add_nterm(id: rule.lhs)
rule.id = @rules.count
@rules << rule
end

Expand Down
15 changes: 15 additions & 0 deletions lib/lrama/grammar/counter.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
module Lrama
class Grammar
class Counter
def initialize(number)
@number = number
end

def increment
n = @number
@number += 1
n
end
end
end
end
46 changes: 23 additions & 23 deletions lib/lrama/grammar/rule_builder.rb
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@ module Lrama
class Grammar
class RuleBuilder
attr_accessor :lhs, :line
attr_accessor :extracted_action_number
attr_reader :rhs, :separators, :user_code, :precedence_sym

def initialize
def initialize(rule_counter, midrule_action_counter)
@rule_counter = rule_counter
@midrule_action_counter = midrule_action_counter

@lhs = nil
@rhs = []
@separators = []
Expand Down Expand Up @@ -57,10 +59,9 @@ def midrule_action_rules
token.is_a?(Lrama::Lexer::Token::UserCode)
end.each_with_index.map do |code, i|
prefix = code.referred ? "@" : "$@"
new_token = Lrama::Lexer::Token::Ident.new(s_value: prefix + (extracted_action_number + i).to_s)
new_token = Lrama::Lexer::Token::Ident.new(s_value: prefix + @midrule_action_counter.increment.to_s)
@code_to_new_token[code] = new_token
# id is set later
Rule.new(id: nil, lhs: new_token, rhs: [], token_code: code, lineno: code.line)
Rule.new(id: @rule_counter.increment, lhs: new_token, rhs: [], token_code: code, lineno: code.line)
end
end

Expand All @@ -77,8 +78,7 @@ def build_rules
if tokens.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) }
expand_parameterizing_rules
else
# id is set later
[Rule.new(id: nil, lhs: lhs, rhs: tokens, token_code: user_code, precedence_sym: precedence_sym, lineno: line)]
[Rule.new(id: @rule_counter.increment, lhs: lhs, rhs: tokens, token_code: user_code, precedence_sym: precedence_sym, lineno: line)]
end
end

Expand All @@ -91,30 +91,30 @@ def expand_parameterizing_rules

if rhs.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) && r.option? }
option_token = Lrama::Lexer::Token::Ident.new(s_value: "option_#{rhs[0].s_value}")
rules << Rule.new(id: nil, lhs: lhs, rhs: [option_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: option_token, rhs: [], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: option_token, rhs: [token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: lhs, rhs: [option_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: option_token, rhs: [], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: option_token, rhs: [token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
elsif rhs.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) && r.nonempty_list? }
nonempty_list_token = Lrama::Lexer::Token::Ident.new(s_value: "nonempty_list_#{rhs[0].s_value}")
rules << Rule.new(id: nil, lhs: lhs, rhs: [nonempty_list_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: nonempty_list_token, rhs: [token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: nonempty_list_token, rhs: [nonempty_list_token, token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: lhs, rhs: [nonempty_list_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: nonempty_list_token, rhs: [token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: nonempty_list_token, rhs: [nonempty_list_token, token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
elsif rhs.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) && r.list? }
list_token = Lrama::Lexer::Token::Ident.new(s_value: "list_#{rhs[0].s_value}")
rules << Rule.new(id: nil, lhs: lhs, rhs: [list_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: list_token, rhs: [], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: list_token, rhs: [list_token, token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: lhs, rhs: [list_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: list_token, rhs: [], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: list_token, rhs: [list_token, token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
elsif rhs.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) && r.separated_nonempty_list? }
separated_list_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_nonempty_list_#{rhs[0].s_value}")
rules << Rule.new(id: nil, lhs: lhs, rhs: [separated_list_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: separated_list_token, rhs: [token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: separated_list_token, rhs: [separated_list_token, rhs[2], token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: lhs, rhs: [separated_list_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: separated_list_token, rhs: [token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: separated_list_token, rhs: [separated_list_token, rhs[2], token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
elsif rhs.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) && r.separated_list? }
separated_list_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_list_#{rhs[0].s_value}")
rules << Rule.new(id: nil, lhs: lhs, rhs: [separated_list_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: separated_list_token, rhs: [], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: separated_list_token, rhs: [token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: nil, lhs: separated_list_token, rhs: [separated_list_token, rhs[2], token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: lhs, rhs: [separated_list_token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: separated_list_token, rhs: [], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: separated_list_token, rhs: [token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
rules << Rule.new(id: @rule_counter.increment, lhs: separated_list_token, rhs: [separated_list_token, rhs[2], token], token_code: user_code, precedence_sym: precedence_sym, lineno: line)
end

rules
Expand Down
8 changes: 5 additions & 3 deletions lib/lrama/parser.rb

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 5 additions & 3 deletions parser.y
Original file line number Diff line number Diff line change
Expand Up @@ -312,12 +312,12 @@ rule
rhs: /* empty */
{
reset_precs
result = Grammar::RuleBuilder.new
result = Grammar::RuleBuilder.new(@rule_counter, @midrule_action_counter)
}
| "%empty"
{
reset_precs
result = Grammar::RuleBuilder.new
result = Grammar::RuleBuilder.new(@rule_counter, @midrule_action_counter)
}
| rhs symbol named_ref_opt
{
Expand Down Expand Up @@ -430,12 +430,14 @@ def initialize(text, path, debug = false)
@text = text
@path = path
@yydebug = debug
@rule_counter = Lrama::Grammar::Counter.new(0)
@midrule_action_counter = Lrama::Grammar::Counter.new(1)
end

def parse
report_duration(:parse) do
@lexer = Lrama::Lexer.new(@text)
@grammar = Lrama::Grammar.new
@grammar = Lrama::Grammar.new(@rule_counter)
@precedence_number = 0
reset_precs
do_parse
Expand Down
11 changes: 11 additions & 0 deletions sig/lrama/grammar/counter.rbs
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
module Lrama
class Grammar
class Counter
@number: Integer

def initialize: (Integer number) -> void

def increment: () -> Integer
end
end
end
1 change: 1 addition & 0 deletions sig/lrama/grammar/rule_builder.rbs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ module Lrama
@user_code: Lexer::Token::UserCode?
@code_to_new_token: Hash[Lexer::Token, Lexer::Token]

def initialize: (Counter rule_counter, Counter midrule_action_counter) -> void
def add_rhs: (Lexer::Token rhs) -> void
def user_code=: (Lexer::Token::UserCode user_code) -> void
def precedence_sym=: (Lexer::Token user_code) -> void
Expand Down
6 changes: 3 additions & 3 deletions spec/lrama/grammar/rule_builder_spec.rb
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
RSpec.describe Lrama::Grammar::RuleBuilder do
let(:rule_builder) { Lrama::Grammar::RuleBuilder.new }
let(:rule_counter) { Lrama::Grammar::Counter.new(1) }
let(:midrule_action_counter) { Lrama::Grammar::Counter.new(1) }
let(:rule_builder) { Lrama::Grammar::RuleBuilder.new(rule_counter, midrule_action_counter) }

describe "#add_rhs" do
describe "@line" do
Expand Down Expand Up @@ -139,7 +141,6 @@
rule_builder.freeze_rhs

rule_builder.preprocess_references
rule_builder.extracted_action_number = 1
rules = rule_builder.midrule_action_rules

expect(rules.count).to eq 2
Expand Down Expand Up @@ -172,7 +173,6 @@
rule_builder.freeze_rhs

rule_builder.preprocess_references
rule_builder.extracted_action_number = 1
rule_builder.midrule_action_rules
tokens = rule_builder.rhs_with_new_tokens

Expand Down

0 comments on commit e9353bf

Please sign in to comment.