Reimplemented getNextToken
[svjatoslav_commons.git] / src / main / java / eu / svjatoslav / commons / string / tokenizer / Tokenizer.java
index c4ca4d2..e92ccd7 100755 (executable)
@@ -15,6 +15,7 @@ import java.util.Stack;
 import java.util.stream.Stream;
 
 import static eu.svjatoslav.commons.string.tokenizer.Terminator.TerminationStrategy.DROP;
+import static eu.svjatoslav.commons.string.tokenizer.Terminator.TerminationStrategy.PRESERVE;
 
 public class Tokenizer {
 
@@ -32,6 +33,7 @@ public class Tokenizer {
     public Tokenizer setSource(String source){
         this.source = source;
         currentIndex = 0;
+        tokenIndexes.clear();
         return this;
     }
 
@@ -57,46 +59,52 @@ public class Tokenizer {
 
     public TokenizerMatch getNextToken() {
         tokenIndexes.push(currentIndex);
-        final StringBuilder result = new StringBuilder();
-
-        while (true) {
-            if (currentIndex >= source.length())
-                return null;
-
-            boolean accumulateCurrentChar = true;
-
-            for (final Terminator terminator : terminators)
-                if (sequenceMatches(terminator.startSequence))
-
-                    if (terminator.termination == DROP) {
-                        currentIndex += terminator.startSequence.length();
-
-                        if (terminator.endSequence != null)
-                            skipUntilSequence(terminator.endSequence);
-
-                        if (result.length() > 0)
-                            return new TokenizerMatch(result.toString(),
-                                    terminator);
-                        else {
-                            accumulateCurrentChar = false;
-                            break;
-                        }
-                    } else if (result.length() > 0)
-                        return new TokenizerMatch(result.toString(), terminator);
-                    else {
-                        currentIndex += terminator.startSequence.length();
-                        return new TokenizerMatch(terminator.startSequence,
-                                terminator);
-                    }
 
-            if (accumulateCurrentChar) {
-                result.append(source.charAt(currentIndex));
+        StringBuilder token = new StringBuilder();
+
+        while (true){
+            if (isTokenTermination()){
+                Terminator tokenTerminator = findTokenTerminator();
+
+                if (tokenTerminator.termination == PRESERVE){
+                    if (hasAccumulatedToken(token)){
+                        // already assembled some token
+                        return new TokenizerMatch(token.toString(), "", tokenTerminator);
+                    } else {
+                        currentIndex++;
+                        return new TokenizerMatch(tokenTerminator.startSequence, "", tokenTerminator);
+                    }
+                } else if (tokenTerminator.termination == DROP){
+                    if (hasAccumulatedToken(token)){
+                        currentIndex++;
+                        return new TokenizerMatch(token.toString(), "", tokenTerminator);
+                    } else {
+                        currentIndex++;
+                    }
+                }
+            } else {
+                token.append(source.charAt(currentIndex));
                 currentIndex++;
             }
         }
 
     }
 
+    private boolean hasAccumulatedToken(StringBuilder token) {
+        return token.length() > 0;
+    }
+
+    private boolean isTokenTermination() {
+        return findTokenTerminator() != null;
+    }
+
+    public Terminator findTokenTerminator() {
+        for (Terminator terminator : terminators)
+            if (terminator.matches(source, currentIndex))
+                return terminator;
+        return null;
+    }
+
     public boolean consumeIfNextToken(final String token) {
         if (token.equals(getNextToken().token))
             return true;