# This file is part of python-ly, https://pypi.python.org/pypi/python-ly
#
# Copyright (c) 2008 - 2015 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Parses and tokenizes Scheme input.
"""
from __future__ import unicode_literals
from . import _token
from . import Parser, FallthroughParser
[docs]class Scheme(_token.Token):
"""Baseclass for Scheme tokens."""
pass
[docs]class String(_token.String):
pass
[docs]class StringQuotedStart(String, _token.StringStart):
rx = r'"'
[docs] def update_state(self, state):
state.enter(ParseString())
[docs]class StringQuotedEnd(String, _token.StringEnd):
rx = r'"'
[docs] def update_state(self, state):
state.leave()
state.endArgument()
[docs]class StringQuoteEscape(_token.Character):
rx = r'\\[\\"]'
[docs]class OpenParen(Scheme, _token.MatchStart, _token.Indent):
rx = r"\("
matchname = "schemeparen"
[docs] def update_state(self, state):
state.enter(ParseScheme())
[docs]class CloseParen(Scheme, _token.MatchEnd, _token.Dedent):
rx = r"\)"
matchname = "schemeparen"
[docs] def update_state(self, state):
state.leave()
state.endArgument()
[docs]class Quote(Scheme):
rx = r"['`,]"
[docs]class Dot(Scheme):
rx = r"\.(?!\S)"
[docs]class Bool(Scheme, _token.Item):
rx = r"#[tf]\b"
[docs]class Char(Scheme, _token.Item):
rx = r"#\\([a-z]+|.)"
[docs]class Word(Scheme, _token.Item):
rx = r'[^()"{}\s]+'
[docs]class Keyword(Word):
[docs] @classmethod
def test_match(cls, match):
from .. import data
return match.group() in data.scheme_keywords()
[docs]class Function(Word):
[docs] @classmethod
def test_match(cls, match):
from .. import data
return match.group() in data.scheme_functions()
[docs]class Variable(Word):
[docs] @classmethod
def test_match(cls, match):
from .. import data
return match.group() in data.scheme_variables()
[docs]class Constant(Word):
[docs] @classmethod
def test_match(cls, match):
from .. import data
return match.group() in data.scheme_constants()
[docs]class Number(_token.Item, _token.Numeric):
rx = (r"("
r"-?\d+|"
r"#(b[0-1]+|o[0-7]+|x[0-9a-fA-F]+)|"
r"[-+]inf.0|[-+]?nan.0"
r")(?=$|[)\s])")
[docs]class Fraction(Number):
rx = r"-?\d+/\d+(?=$|[)\s])"
[docs]class Float(Number):
rx = r"-?((\d+(\.\d*)|\.\d+)(E\d+)?)(?=$|[)\s])"
[docs]class VectorStart(OpenParen):
rx = r"#\("
[docs]class LilyPond(_token.Token):
pass
[docs]class LilyPondStart(LilyPond, _token.MatchStart, _token.Indent):
rx = r"#{"
matchname = "schemelily"
[docs] def update_state(self, state):
state.enter(ParseLilyPond())
[docs]class LilyPondEnd(LilyPond, _token.Leaver, _token.MatchEnd, _token.Dedent):
rx = r"#}"
matchname = "schemelily"
# Parsers
[docs]class ParseScheme(Parser):
mode = 'scheme'
items = (
_token.Space,
OpenParen,
CloseParen,
LineComment,
BlockCommentStart,
LilyPondStart,
VectorStart,
Dot,
Bool,
Char,
Quote,
Fraction,
Float,
Number,
Constant,
Keyword,
Function,
Variable,
Word,
StringQuotedStart,
)
[docs]class ParseString(Parser):
default = String
items = (
StringQuotedEnd,
StringQuoteEscape,
)
from . import lilypond
[docs]class ParseLilyPond(lilypond.ParseMusic):
items = (LilyPondEnd,) + lilypond.ParseMusic.items