Fixes two pump.py bugs. One of them ("$range 1..n $$ comment" doesn't parse) was reported by user Aksai Chin. Aksai also contributed a patch, which I didn't look at as I didn't want to bother him with signing the CLA. Instead I wrote the fix from scratch.
This commit is contained in:
parent
82cc1d1135
commit
fe25aea971
|
@ -29,7 +29,7 @@
|
|||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""pump v0.1 - Pretty Useful for Meta Programming.
|
||||
"""pump v0.2.0 - Pretty Useful for Meta Programming.
|
||||
|
||||
A tool for preprocessor meta programming. Useful for generating
|
||||
repetitive boilerplate code. Especially useful for writing C++
|
||||
|
@ -78,7 +78,6 @@ TOKEN_TABLE = [
|
|||
(re.compile(r'\$range\s+'), '$range'),
|
||||
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
|
||||
(re.compile(r'\$\(\$\)'), '$($)'),
|
||||
(re.compile(r'\$\$.*'), '$$'),
|
||||
(re.compile(r'\$'), '$'),
|
||||
(re.compile(r'\[\[\n?'), '[['),
|
||||
(re.compile(r'\]\]\n?'), ']]'),
|
||||
|
@ -224,6 +223,17 @@ def SubString(lines, start, end):
|
|||
return ''.join(result_lines)
|
||||
|
||||
|
||||
def StripMetaComments(str):
|
||||
"""Strip meta comments from each line in the given string."""
|
||||
|
||||
# First, completely remove lines containing nothing but a meta
|
||||
# comment, including the trailing \n.
|
||||
str = re.sub(r'^\s*\$\$.*\n', '', str)
|
||||
|
||||
# Then, remove meta comments from contentful lines.
|
||||
return re.sub(r'\s*\$\$.*', '', str)
|
||||
|
||||
|
||||
def MakeToken(lines, start, end, token_type):
|
||||
"""Creates a new instance of Token."""
|
||||
|
||||
|
@ -311,11 +321,7 @@ def TokenizeLines(lines, pos):
|
|||
prev_token = MakeToken(lines, pos, found.start, 'code')
|
||||
prev_token_rstripped = RStripNewLineFromToken(prev_token)
|
||||
|
||||
if found.token_type == '$$': # A meta comment.
|
||||
if prev_token_rstripped:
|
||||
yield prev_token_rstripped
|
||||
pos = Cursor(found.end.line + 1, 0)
|
||||
elif found.token_type == '$var':
|
||||
if found.token_type == '$var':
|
||||
if prev_token_rstripped:
|
||||
yield prev_token_rstripped
|
||||
yield found
|
||||
|
@ -374,8 +380,11 @@ def TokenizeLines(lines, pos):
|
|||
|
||||
|
||||
def Tokenize(s):
|
||||
lines = s.splitlines(True)
|
||||
return TokenizeLines(lines, Cursor(0, 0))
|
||||
"""A generator that yields the tokens in the given string."""
|
||||
if s != '':
|
||||
lines = s.splitlines(True)
|
||||
for token in TokenizeLines(lines, Cursor(0, 0)):
|
||||
yield token
|
||||
|
||||
|
||||
class CodeNode:
|
||||
|
@ -565,11 +574,9 @@ def ParseCodeNode(tokens):
|
|||
return CodeNode(atomic_code_list)
|
||||
|
||||
|
||||
def Convert(file_path):
|
||||
s = file(file_path, 'r').read()
|
||||
tokens = []
|
||||
for token in Tokenize(s):
|
||||
tokens.append(token)
|
||||
def ParseToAST(pump_src_text):
|
||||
"""Convert the given Pump source text into an AST."""
|
||||
tokens = list(Tokenize(pump_src_text))
|
||||
code_node = ParseCodeNode(tokens)
|
||||
return code_node
|
||||
|
||||
|
@ -805,16 +812,21 @@ def BeautifyCode(string):
|
|||
return '\n'.join(output2) + '\n'
|
||||
|
||||
|
||||
def ConvertFromPumpSource(src_text):
|
||||
"""Return the text generated from the given Pump source text."""
|
||||
ast = ParseToAST(StripMetaComments(src_text))
|
||||
output = Output()
|
||||
RunCode(Env(), ast, output)
|
||||
return BeautifyCode(output.string)
|
||||
|
||||
|
||||
def main(argv):
|
||||
if len(argv) == 1:
|
||||
print __doc__
|
||||
sys.exit(1)
|
||||
|
||||
file_path = argv[-1]
|
||||
ast = Convert(file_path)
|
||||
output = Output()
|
||||
RunCode(Env(), ast, output)
|
||||
output_str = BeautifyCode(output.string)
|
||||
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
|
||||
if file_path.endswith('.pump'):
|
||||
output_file_path = file_path[:-5]
|
||||
else:
|
||||
|
|
Loading…
Reference in New Issue
Block a user