Commit 1557ab76 authored by matthew-eads's avatar matthew-eads
Browse files

executable produces (mostly) valid parser and lexer

parent 59b86cc4
......@@ -6,17 +6,17 @@ data Prog = Prog [AST]
data AST = Expression Exp
| TopLevlDec Dec
data Exp = SET ID Exp
| IF SimpleExp Exp (Maybe Exp)
data Exp = SET ID Exp
| IF SimpleExp Exp (Maybe Exp) --> if
| WHILE SimpleExp Exp
| BEGIN (List1 Exp)
| LET [(String, Exp)] Exp
data BinOp = Plus SimpleExp SimpleExp
data BinOp = Plus SimpleExp SimpleExp
| Mult SimpleExp SimpleExp
| Div SimpleExp SimpleExp
| Minus SimpleExp SimpleExp
| CheckExpect SimpleExp SimpleExp
| CheckExpect SimpleExp SimpleExp
data SimpleExp =
Lit Int
......
This diff is collapsed.
{
module Main where
import Lexer
}
%name imp
%tokentype {Token}
%token
new { TokenBuiltin $$}
"check-expect" { TokenOp $$ }
"-" { TokenOp $$ }
"/" { TokenOp $$ }
"*" { TokenOp $$ }
"+" { TokenOp $$ }
if { TokenIf }
while { TokenWhile }
begin { TokenBegin }
set { TokenSet }
"(" { TokenLParen }
")" { TokenRParen }
int { TokenInt $$ }
var { TokenVar $$ }
String {TokenVar $$ }
Int {TokenInt $$}
ID {TokenVar $$ }
"@" { TokenOp $$ }
"!" { TokenOp $$ }
"$" { TokenOp $$ }
"%" { TokenOp $$ }
"^" { TokenOp $$ }
"&" { TokenOp $$ }
"_" { TokenOp $$ }
"`" { TokenOp $$ }
"~" { TokenOp $$ }
"=" { TokenOp $$ }
"[" { TokenOp $$ }
"]" { TokenOp $$ }
"{" { TokenOp $$ }
"}" { TokenOp $$ }
":" { TokenOp $$ }
";" { TokenOp $$ }
"<" { TokenOp $$ }
">" { TokenOp $$ }
"Plus" { TokenOp $$ }
"Mult" { TokenOp $$ }
"Div" { TokenOp $$ }
"Minus" { TokenOp $$ }
"CheckExpect" { TokenOp $$ }
Potato { TokenOp "potato" }
%error {parseError}
%%
Multi0AST: AST Multi0AST {$1:$2}
| {[]}
Multi1Exp: Exp Multi1Exp {$1:$2}
| Exp {[$1]}
ExpExp: Exp Exp {($1, $2)}
MaybePotato: Potato {Just $1}
| {Nothing}
StringExp: String Exp {($1, $2)}
Multi0StringExp: StringExp Multi0StringExp {$1:$2}
| {[]}
Multi0Exp: Exp Multi0Exp {$1:$2}
| {[]}
Prog: Multi0AST {Prog $1}
AST: Exp {Expression $1}
| Dec {TopLevlDec $1}
Exp: ID Exp {SET $1 $2}
| SimpleExp Exp Exp {IF $1 $2 $3}
| SimpleExp Exp {WHILE $1 $2}
| Multi1Exp {BEGIN $1}
| ExpExp MaybePotato {TUPY $1 $2}
| Multi0StringExp {LET $1}
BinOp: "Plus" SimpleExp SimpleExp {Plus $1 $2}
| "Mult" SimpleExp SimpleExp {Mult $1 $2}
| "Div" SimpleExp SimpleExp {Div $1 $2}
| "Minus" SimpleExp SimpleExp {Minus $1 $2}
| "CheckExpect" SimpleExp SimpleExp {CheckExpect $1 $2}
SimpleExp: Int {Lit $1}
| ID {Var $1}
| ID Multi0Exp {Apply $1 $2}
| BinOp {BinOp $1}
| SimpleExp SimpleExp {AGet $1 $2}
| SimpleExp SimpleExp SimpleExp {ASet $1 $2 $3}
| ID SimpleExp {AMake $1 $2}
Dec: ID Exp {Val $1 $2}
{
type ID = String
type Potato = String
data Prog = Prog [AST]
data AST = Expression Exp
| TopLevlDec Dec
data Exp = SET ID Exp
| IF SimpleExp Exp Exp
| WHILE SimpleExp Exp
| BEGIN (List1 Exp)
| TUPY (Exp, Exp) (Maybe Potato)
| LET [(String, Exp)]
data BinOp = Plus SimpleExp SimpleExp
| Mult SimpleExp SimpleExp
| Div SimpleExp SimpleExp
| Minus SimpleExp SimpleExp
| CheckExpect SimpleExp SimpleExp
data SimpleExp =
Lit Int
| Var ID
| Apply ID (List0 Exp)
| BinOp BinOp
| AGet SimpleExp SimpleExp --(arr[i])
| ASet SimpleExp SimpleExp SimpleExp --(set arr[i] j)
| AMake ID SimpleExp --(new arr size)
data Dec = Val ID Exp
main = do {
putStrLn "hello";
}
}
data Stmt = IF Conditional Stmt Stmt -- if c e1 e2 (returns unit type)
| Set String Int -- x = y
| Exp
data Conditional = Equal Exp Exp -- x = y
| TernaryIf Conditional Exp Exp -- if c e1 e2 (returns e1 or e2)
data Exp = Var String
| Lit Int
......@@ -16,14 +16,20 @@ import Text.Regex
import Data.List.Unique
import Data.List
import Data.List.Split
import Data.Char
type Directive = String
type Grammar = String
-- name , [constructions]
-- ex: Exp ,
-- ex: Exp , [IF Exp Exp Exp, etc.]
data DecT = DecT String [RHS] deriving Show
-- Constructor name, [
-- Constructor name, [Types]
-- ex: IF [Exp, Exp, (Maybe Exp)]
data RHS = RHS String [String] deriving Show
data DecT' = DecT' String [RHS'] deriving Show
data RHS' = RHS' String [String] Directive deriving Show
list0 :: Name
list0 = mkName "List0"
list1 :: Name
......@@ -100,9 +106,9 @@ showRHS (RHS n types) =
-- Indicates if the given RHS construction is unique in the entire
-- data declaration. data A = X E | Y E E: X,Y are unique, not so
-- in data A = X E | Y E
isUnique :: [RHS] -> RHS -> Bool
isUnique rules (RHS n types) =
foldr (\(RHS name ts) acc -> acc && (name == n ||ts /= types)) True rules
isUnique :: [RHS'] -> RHS' -> Bool
isUnique rules (RHS' n types _) =
foldr (\(RHS' name ts _) acc -> acc && (name == n ||ts /= types)) True rules
myappend :: String -> String -> String
myappend s1 s2 = s1 ++ " " ++ s2
......@@ -113,9 +119,16 @@ makeNums _ 0 _ = ""
makeNums i 1 _ = "$" ++ (show i)
makeNums i n sep = "$" ++ (show (i-n+1)) ++ sep ++ (makeNums i (n-1) sep)
makeNumsL :: [Int] -> String
makeNumsL [] = []
makeNumsL (n:ns) = "$" ++ (show n) ++ " " ++ makeNumsL ns
-- produces the construction -> {EXP $1 $2} from the given RHS rule
rrhs :: RHS -> String
rrhs (RHS s ts) = "\t{" ++ s ++ " " ++ (makeNums (length ts) (length ts) " ") ++ "}"
rrhs :: RHS' -> String
rrhs (RHS' s ts _) = "\t{" ++ s ++ " " ++ (makeNums (length ts) (length ts) " ") ++ "}"
rrhsL :: String -> [Int] -> String
rrhsL name ints = "\t{" ++ name ++ " " ++ (makeNumsL ints) ++ "}"
isMulti :: String -> Bool
isMulti "*" = True
......@@ -138,13 +151,35 @@ fixMultis s = (if isMulti1 s then "Multi1"
else if isMulti0 s then "Multi0"
else "") ++ (strip s)
rhsToRule :: [RHS] -> RHS -> (String, [(String, String)])
rhsToRule rules = \(RHS s ts) ->
let ts' = (map fixMultis ts) in
((if (isUnique rules (RHS s ts))
then foldr myappend "" ts'
else "\"" ++ s ++ "\" " ++ (foldr myappend "" ts')) ++ rrhs (RHS s ts'), [])
nums0 :: Int -> [Int]
nums0 0 = []
nums0 n = (nums0 (n-1)) ++ [n]
nums1 :: Int -> [Int]
nums1 0 = []
nums1 n = (nums1 (n-1)) ++ [n+1]
rhsToRule :: [RHS'] -> RHS' -> (String, [String])
rhsToRule rules =
\(RHS' s ts d) ->
let ts' = (map fixMultis ts)
in
(case d of
"" -> if (isUnique rules (RHS' s ts d))
then ((foldr myappend "" ts') ++ (rrhsL s (nums0 (length ts))), [])
else ("\"" ++ s ++ "\" " ++ (foldr myappend "" ts') ++ (rrhsL s (nums1 (length ts))),
[s])
"if" -> if length ts == 2
then ("\"if\" " ++ (head ts) ++ " \"then\" " ++ (head (tail ts))
++ (rrhsL s [2,4]), [])
else if length ts == 3
then ("\"if\" " ++ (head ts) ++ " \"then\" " ++ (head (tail ts))
++ " \"else\" " ++ (head (tail (tail ts))) ++ (rrhsL s [2,4,6]),
[])
else error "bad number of constructions for if rule"
"while" -> ("\"while\" \"(\"" ++ (head ts) ++ " \")\" \"{\" " ++ (head (tail ts))
++ " \"}\"" ++ (rrhsL s [3, 6]), [])
bad -> error ("directive " ++ bad ++ " not supported"))
strip :: String -> String
strip ('*':[]) = []
......@@ -152,6 +187,10 @@ strip ('+':[]) = []
strip (s:r) = s:(strip r)
strip [] = []
cleanWS :: String -> String
cleanWS s = (reverse (dropWhile (\x -> x == ' ' || x == '\t' || x == '\n' || x == '\r')
(reverse (dropWhile (\x -> x == ' ' || x == '\t' || x == '\n' || x == '\r') s))))
makeMultiRules :: Type -> String -> String
makeMultiRules (AppT (ConT listt) _) s =
let i = if listt == list0 then 0 else 1
......@@ -174,8 +213,8 @@ makeTab s = let helper 0 a = a
-- converts the given DecT declaration to a grammar rule which
-- will in turn produce the given declaration.
decTToRule :: DecT -> (String, [(String,String)])
decTToRule (DecT n rhs) =
decTToRule :: DecT' -> (String, [String])
decTToRule (DecT' n rhs) =
let (rhsStrings, tokes) = unzip (map (rhsToRule rhs) rhs)
tab = makeTab n
tokes' = foldr (++) [] tokes
......@@ -183,7 +222,7 @@ decTToRule (DecT n rhs) =
(foldr (\r acc -> tab ++ "| " ++ r ++ "\n" ++ acc) "" (tail rhsStrings)), tokes')
-- Creates the grammar rules from the declarations
makeRules :: [DecT] -> (String, [(String, String)])
makeRules :: [DecT'] -> (String, [String])
makeRules decs = (foldr (\r (r', ts') ->
let (rule, tokes) = (decTToRule r)
in ((rule ++ "\n" ++ r'), (ts' ++ tokes))) ("",[]) decs)
......@@ -220,15 +259,97 @@ cleanMulti s =
(removeDups dups rest True)
in (removeDups dups' s False)
parseAstDecs :: String -> Grammar
parseAstDecs s = let decs = case parseDecs s of
(Right ds) -> ds
(Left err) -> (trace err undefined)
dataDs = filter isDataD decs
decTs = (map dataDToDecT dataDs)
(rules, tokes) = makeRules (map (\(x,_) -> x) decTs)
in cleanNLs (cleanMulti (foldr (\(_, y) acc -> y ++ acc) "" decTs)) ++ rules
-- in (foldr (\(x, y) acc -> y ++ acc) "" decTs) ++ rules
addDirectiveToRHS :: String -> [RHS] -> [RHS']
addDirectiveToRHS s ((RHS n cs):rs) =
let (rhs, rest) = case matchRegexAll (mkRegex "([^|]|[\n\r])*") s of
Just (_, match, after, _) -> (trace {-("\nfound rhs: " ++ match ++ "\n and rest: " ++ after)-} "" (match, after))
Nothing -> (trace ("\nno match in finding rhs: " ++ s) (s, ""))
directive = case matchRegexAll (mkRegex "-- >.*") rhs of
Nothing -> ""
Just (_, match, _, _) -> cleanWS (drop 4 match)
in (RHS' n cs directive):(addDirectiveToRHS (if null rest then [] else tail rest) rs)
addDirectiveToRHS _ [] = []
addDirectiveToDecT :: String -> DecT -> DecT'
addDirectiveToDecT s (DecT n rhs) = DecT' n (addDirectiveToRHS s rhs)
-- DecT' n (map (addDirectiveToRHS s) rhs)
addDirectives :: [DecT] -> String -> [DecT']
addDirectives (d:ds) s =
case matchRegexAll (mkRegex "data") s of
Just (before, _, after, _) -> (addDirectiveToDecT before d):(addDirectives ds after)
Nothing -> [(addDirectiveToDecT s d)]
addDirectives [] _ = []
getDirectives :: DecT' -> String -> String
getDirectives (DecT' _ rs) acc = (foldr (\(RHS' _ _ s) y -> s ++ "\n" ++ y) "\n" rs) ++ acc
inComment :: String -> Int -> String
inComment ('-':'}':s) 1 = cleanComments s
inComment ('-':'}':s) n = inComment s (n-1)
inComment ('{':'-':s) n = inComment s (n+1)
inComment (_:s) n = inComment s n
inComment [] _ = []
cleanComments :: String -> String
cleanComments ('{':'-':s) = inComment s 1
cleanComments ('-':'-':'>':s) = "-- >" ++ (cleanComments s)
cleanComments ('-':'-':s) = cleanComments (dropWhile (\x -> (x /= '\r') && (x /= '\n')) s)
cleanComments (x:s) = x:(cleanComments s)
cleanComments [] = []
cleanHead :: String -> String
cleanHead s = case matchRegexAll (mkRegex "data") s of
Just (_, _, after, _) -> after
Nothing -> s
parseAstDecs :: String -> String -> IO ()
parseAstDecs s name =
let s' = cleanComments s
decs = case parseDecs s' of
(Right ds) -> ds
(Left err) -> (trace err undefined)
dataDs = filter isDataD decs
decTs = (map dataDToDecT dataDs)
directives = addDirectives (map (\(x,_) -> x) decTs) (cleanHead s')
-- dstring = foldr getDirectives "\n" directives
(rules, tokes) = makeRules directives
grammarS = cleanNLs (cleanMulti
(foldr (\(_, y) acc -> y ++ acc) "" decTs)) ++ rules
name' = (head (splitOn "." name))
parser = makeParser name' (nub tokes) grammarS s'
lexer = makeLexer name' (nub tokes)
in do {writeFile (name' ++ "Parser.y") parser; writeFile (name' ++ "Lexer.x") lexer;
return ();}
-- in (foldr (\(x, y) acc -> y ++ acc) "" decTs) ++ rules
makeParser :: String -> [String] -> String -> String -> String
makeParser name tokes grammar ast =
let tokenRules = cleanNLs $ foldr (\t a -> "\"" ++ t ++ "\""
++ " { " ++ "Token" ++ ((toUpper (head t)):(tail t)) ++
" }\n" ++ a) "" tokes
in
"{\nmodule " ++ name ++ "Parser where\nimport " ++ name ++ "Lexer\n}\n%name " ++ (map toLower name) ++
"\n%tokentype { Token }\n%token\n" ++ tokenRules ++ "Int { TokenInt $$ }\nID { TokenID $$ }\nset { TokenSet }\nif { TokenIf}\nplus { TokenPlus}\nwhile { TokenWhile }\nbegin { TokenBegin }\nString { TokenString $$ }\neof { TokenEOF }\nnl { TokenNL }\n%error {parseError}\n\n%%\n\n" ++ grammar ++
"\n{\n" ++ ast ++ "\nparseError :: [Token] -> a\nparseError t = error (\"Parse Error on token(s) \" ++ (show t))\n\ntype ID = String\n}\n"
makeLexer :: String -> [String] -> String
makeLexer name tokes =
let tokenRules = cleanNLs $ foldr (\t a ->
"<0> " ++ '"':t ++ "\"\t{ \\(pos,_,_,inp) len -> return Token"
++ ((toUpper (head t)):(tail t)) ++ " :: Alex Token }\n" ++ a) "" tokes
dataRules = cleanNLs $ foldr (\t a -> "\t| Token" ++ ((toUpper (head t)):(tail t))
++ "\n" ++ a) "" tokes
in ("{\nmodule " ++ name ++ "Lexer (runLexer, Token (..)) where\nimport Debug.Trace\n}\n\n%wrapper \"monad\"\n\n$spaces = [\\ \\t]\n$alpha = [a-zA-Z]\n$digits = [0-9]\n$alnum = [$alpha$digits]\n$alnumplus = [$alnum\\_\\-]\n$nl = [\\n\\r]\n\n@identifier = $alpha $alnumplus*\n@comment = \\-\\-.*\n@integer = $digits+\n@string = \\\"[^\\\"]*\\\"\n\n\n:-\n\n<0> @integer { \\(pos,_,_,inp) len -> return $ TokenInt (read (take len inp)) }\n<0> @string { \\(pos,_,_,inp) len -> return $ TokenString (take len inp) }\n\n<0> @identifier { \\(pos,_,_,inp) len -> return $ TokenID (take len inp) }\n\n"
++ tokenRules ++
"<0> \"=\" { \\(pos,_,_,inp) len -> return $ TokenEq }\n<0> \"+\" { \\(pos,_,_,inp) len -> return $ TokenPlus }\n<0> \"-\" { \\(pos,_,_,inp) len -> return $ TokenDash }\n<0> \"_\" { \\(pos,_,_,inp) len -> return $ TokenUnderscore }\n<0> \"\\[\" { \\(pos,_,_,inp) len -> return $ TokenLBrack }\n<0> \"\\]\" { \\(pos,_,_,inp) len -> return $ TokenRBrack }\n<0> \";\" { \\(pos,_,_,inp) len -> return $ TokenSemiColon }\n<0> \">\" { \\(pos,_,_,inp) len -> return $ TokenGT }\n<0> \"(\" { \\(pos,_,_,inp) len -> return $ TokenLParen }\n<0> \")\" { \\(pos,_,_,inp) len -> return $ TokenRParen }\n<0> \"\n\" { \\(pos,_,_,inp) len -> return $ TokenNL }\n<0> set { \\(pos,_,_,inp) len -> return $ TokenSet :: Alex Token}\n<0> if { \\(pos,_,_,inp) len -> return $ TokenIf :: Alex Token }\n<0> then { \\(pos,_,_,inp) len -> return $ TokenThen :: Alex Token }\n<0> else { \\(pos,_,_,inp) len -> return $ TokenElse :: Alex Token }\n<0> while { \\(pos,_,_,inp) len -> return $ TokenWhile :: Alex Token }\n<0> \"<\" { \\(pos,_,_,inp) len -> return $ TokenLT :: Alex Token }\n<0> \"<=\" { \\(pos,_,_,inp) len -> return $ TokenLE :: Alex Token }\n<0> \">=\" { \\(pos,_,_,inp) len -> return $ TokenGE :: Alex Token }\n<0> \"!\" { \\(pos,_,_,inp) len -> return $ TokenBang :: Alex Token }\n<0> \"@\" { \\(pos,_,_,inp) len -> return $ TokenAt :: Alex Token }\n<0> \"#\" { \\(pos,_,_,inp) len -> return $ TokenPound :: Alex Token }\n<0> \"$\" { \\(pos,_,_,inp) len -> return $ TokenDollar :: Alex Token }\n<0> \"%\" { \\(pos,_,_,inp) len -> return $ TokenPercent :: Alex Token }\n<0> \"^\" { \\(pos,_,_,inp) len -> return $ TokenCarat :: Alex Token }\n<0> \"&\" { \\(pos,_,_,inp) len -> return $ TokenAmpersand :: Alex Token }\n<0> \"*\" { \\(pos,_,_,inp) len -> return $ TokenStar :: Alex Token }\n<0> \"{\" { \\(pos,_,_,inp) len -> return $ TokenLBrace :: Alex Token }\n<0> \"}\" { \\(pos,_,_,inp) len -> return $ TokenRBrace :: Alex Token }\n<0> \"(\" { \\(pos,_,_,inp) len -> return $ TokenLParen :: Alex Token }\n<0> \")\" { \\(pos,_,_,inp) len -> return $ TokenRParen :: Alex Token }\n<0> \"?\" { \\(pos,_,_,inp) len -> return $ TokenQuestion :: Alex Token }\n<0> \"/\" { \\(pos,_,_,inp) len -> return $ TokenFSlash :: Alex Token }\n<0> \"\\\" { \\(pos,_,_,inp) len -> return $ TokenBSlash :: Alex Token }\n<0> \":\" { \\(pos,_,_,inp) len -> return $ TokenColon :: Alex Token }\n<0> \"\\\"\" { \\(pos,_,_,inp) len -> return $ TokenDoubleQuote :: Alex Token }\n<0> \"'\" { \\(pos,_,_,inp) len -> return $ TokenSingleQuote :: Alex Token }\n<0> \",\" { \\(pos,_,_,inp) len -> return $ TokenComma :: Alex Token }\n<0> \".\" { \\(pos,_,_,inp) len -> return $ TokenPeriod :: Alex Token }\n<0> \"?\" { \\(pos,_,_,inp) len -> return $ TokenQuestion :: Alex Token }\n<0> \"~\" { \\(pos,_,_,inp) len -> return $ TokenTilda :: Alex Token }\n<0> \"`\" { \\(pos,_,_,inp) len -> return $ TokenTick :: Alex Token }\n<0> \"|\" { \\(pos,_,_,inp) len -> return $ TokenBar :: Alex Token }\n\n\n<0> @comment ;\n<0> [\\ \\t \\n]+ ;\n<0> \"/*\" { begin comment }\n<comment> \"*/\" { begin 0 }\n<comment> . ;\n<0> $white ;\n. { \\(pos,_,_,inp) len -> error \"bad token\" }\n\n{ \n\n\n\n\nalexEOF :: Alex Token\nalexEOF = return TokenEOF\n\nlexwrap = (alexMonadScan >>=)\n\ndata Token = TokenID String \n | TokenSet \n | TokenIf\n | TokenThen\n | TokenElse\n | TokenWhile\n | TokenBegin\n | TokenInt Int\n | TokenVar\n | TokenString String \n | TokenEOF \n | TokenNL \n | TokenLParen\n | TokenRParen\n | TokenLT\n | TokenGT\n | TokenLE\n | TokenGE\n | TokenBang\n | TokenAt\n | TokenPound\n | TokenDollar\n | TokenPercent\n | TokenCarat\n | TokenAmpersand\n | TokenStar\n | TokenDash\n | TokenUnderscore\n | TokenPlus\n | TokenEq\n | TokenLBrace\n | TokenRBrace\n | TokenLBrack\n | TokenRBrack\n | TokenFSlash\n | TokenBSlash\n | TokenSemiColon\n | TokenColon\n | TokenDoubleQuote\n | TokenSingleQuote\n | TokenComma\n | TokenPeriod\n | TokenQuestion\n | TokenTilda\n | TokenTick\n | TokenBar\n "
++ dataRules ++
" deriving (Eq, Show)\n\ntokens str = runAlex str $ do\n let loop = do tok <- alexMonadScan\n if tok == TokenEOF\n then return []\n else do toks <- loop\n return $ tok : toks\n loop\n\nrunLexer s =\n case tokens s of\n (Right tokes) -> tokes\n (Left err) -> error err\n\n}\n\n ")
main :: IO ()
main = do {
args <- getArgs;
......@@ -236,5 +357,5 @@ main = do {
then openFile (head args) ReadMode
else (error "No file given");
contents <- hGetContents astF;
putStrLn (parseAstDecs contents);
parseAstDecs contents (head args);
}
{
module Lexer where
}
%wrapper "basic"
$digit = 0-9
$alpha = [a-z]
$Alpha = [A-Z]
tokens :-
$white+ ;
set { \s -> TokenSet }
if { \s -> TokenIf }
while { \s -> TokenWhile }
begin { \s -> TokenBegin }
"new" { \s -> TokenBuiltin s }
"check-expect" { \s -> TokenOp s }
"-" { \s -> TokenOp s }
"/" { \s -> TokenOp s }
"*" { \s -> TokenOp s }
"+" { \s -> TokenOp s }
[\+\-\/\*] { \s -> TokenOp s}
[\(] { \s -> TokenLParen }
[\)] { \s -> TokenRParen }
$digit+ { \s -> TokenInt (read s) }
$alpha [$alpha $Alpha \_]* { \s -> TokenVar s}
. { \s -> TokenSym (head s)}
{
data Token = TokenSet
| TokenIf
| TokenWhile
| TokenBegin
| TokenSym Char
| TokenOp String
| TokenBuiltin String
| TokenPlus
| TokenMult
| TokenDiv
| TokenMinus
| TokenLParen
| TokenRParen
| TokenInt Int
| TokenVar String
deriving (Eq, Show)
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment