mirror of
https://github.com/sigmasternchen/xtext-core
synced 2025-03-15 08:18:55 +00:00
changed check for token-based path identity to be boundary test
+ changed default token limit to 10 + prepared test for symbolic identity analysis
This commit is contained in:
parent
cd0629d828
commit
58e3bba502
2 changed files with 28 additions and 9 deletions
|
@ -278,10 +278,11 @@ public class HoistingProcessorTest extends AbstractXtextTests {
|
|||
public void testAlternativeIdenticalPaths() throws Exception {
|
||||
// @formatter:off
|
||||
String model =
|
||||
// boundary check: the 10th token should be handled correctly
|
||||
MODEL_PREAMBLE +
|
||||
"S: {S} $$ p0 $$?=> 'a' \n" +
|
||||
" | {S} $$ p1 $$?=> 'a' \n" +
|
||||
" | {S} $$ p2 $$?=> 'b' ;";
|
||||
"S: {S} $$ p0 $$?=> 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' \n" +
|
||||
" | {S} $$ p1 $$?=> 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' \n" +
|
||||
" | {S} $$ p2 $$?=> 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'k' ;";
|
||||
// @formatter:off
|
||||
XtextResource resource = getResourceFromString(model);
|
||||
Grammar grammar = ((Grammar) resource.getContents().get(0));
|
||||
|
@ -290,7 +291,7 @@ public class HoistingProcessorTest extends AbstractXtextTests {
|
|||
HoistingGuard guard = hoistingProcessor.findGuardForElement(rule.getAlternatives());
|
||||
assertFalse(guard.isTrivial());
|
||||
assertTrue(guard.hasTerminal());
|
||||
assertEquals("((" + getSyntaxForKeywordToken("a", 1) + " || ((p0) || (p1))) && (" + getSyntaxForKeywordToken("b", 1) + " || (p2)))", guard.render());
|
||||
assertEquals("((" + getSyntaxForKeywordToken("j", 10) + " || ((p0) || (p1))) && (" + getSyntaxForKeywordToken("k", 10) + " || (p2)))", guard.render());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -355,8 +356,6 @@ public class HoistingProcessorTest extends AbstractXtextTests {
|
|||
assertFalse(guard.isTrivial());
|
||||
assertTrue(guard.hasTerminal());
|
||||
|
||||
// order of paths in result depends on Set-order;
|
||||
// TODO: maybe change to List to make testing easier
|
||||
assertEquals("((((" + getSyntaxForKeywordToken("b", 2) + " || " + getSyntaxForKeywordToken("b", 3) + ") && (" + getSyntaxForKeywordToken("c", 2) + " || " + getSyntaxForKeywordToken("c", 3) + ")) || (p0)) && (((" + getSyntaxForKeywordToken("b", 2) + " || " + getSyntaxForKeywordToken("c", 3) + ") && (" + getSyntaxForKeywordToken("c", 2) + " || " + getSyntaxForKeywordToken("b", 3) + ")) || (p1)))", guard.render());
|
||||
}
|
||||
|
||||
|
@ -404,6 +403,25 @@ public class HoistingProcessorTest extends AbstractXtextTests {
|
|||
AbstractRule rule = getRule(grammar, "S");
|
||||
|
||||
hoistingProcessor.findGuardForElement(rule.getAlternatives());
|
||||
|
||||
}
|
||||
|
||||
// symbolic analysis not yet implemented
|
||||
//@Test
|
||||
public void testAlternativesIdenticalPathsWithSymbolicAnalysis() throws Exception {
|
||||
// @formatter:off
|
||||
String model =
|
||||
MODEL_PREAMBLE +
|
||||
"S: {S} $$ p0 $$?=> C 'z' 'y' 'x' \n" +
|
||||
" | {S} $$ p1 $$?=> C 'z' 'y' 'x' ;\n" +
|
||||
"C: {C} ('a')* ;";
|
||||
// @formatter:off
|
||||
XtextResource resource = getResourceFromString(model);
|
||||
Grammar grammar = ((Grammar) resource.getContents().get(0));
|
||||
AbstractRule rule = getRule(grammar, "S");
|
||||
|
||||
HoistingGuard guard = hoistingProcessor.findGuardForElement(rule.getAlternatives());
|
||||
assertFalse(guard.isTrivial());
|
||||
assertTrue(guard.hasTerminal());
|
||||
assertEquals("((p0) || (p1))", guard.render());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ public class HoistingProcessor {
|
|||
|
||||
private Logger log = Logger.getLogger(this.getClass());
|
||||
|
||||
private static final int TOKEN_ANALYSIS_LIMIT = 5;
|
||||
private static final int TOKEN_ANALYSIS_LIMIT = 10;
|
||||
|
||||
private boolean isParserRule(AbstractElement element) {
|
||||
return (element instanceof RuleCall) && (((RuleCall) element).getRule() instanceof ParserRule);
|
||||
|
@ -241,7 +241,8 @@ public class HoistingProcessor {
|
|||
}
|
||||
|
||||
private boolean arePathsIdenticalFallback(AbstractElement path1, AbstractElement path2) {
|
||||
for (int i = 0; i < TOKEN_ANALYSIS_LIMIT; i++) {
|
||||
// + 1, because otherwise identical paths of length TOKEN_ANALYSIS_LIMIT can't be checked
|
||||
for (int i = 0; i < TOKEN_ANALYSIS_LIMIT + 1; i++) {
|
||||
Set<List<Token>> tokenListSet1;
|
||||
Set<List<Token>> tokenListSet2;
|
||||
|
||||
|
|
Loading…
Reference in a new issue