Commit 85394105 authored by matthew-eads's avatar matthew-eads
Browse files

small bug fixes, and cleanup

parent 1557ab76
*.swp
*.bak
ImpParser.*
TigerParser.*
ImpLexer.*
TigerLexer.*
dist
cabal-dev
*.o
......
......@@ -8,18 +8,18 @@ data AST = Expression Exp
data Exp = SET ID Exp
| IF SimpleExp Exp (Maybe Exp) --> if
| WHILE SimpleExp Exp
| BEGIN (List1 Exp)
| WHILE SimpleExp Exp --> while
| BEGIN (List1 Exp)
| LET [(String, Exp)] Exp
data BinOp = Plus SimpleExp SimpleExp
| Mult SimpleExp SimpleExp
| Div SimpleExp SimpleExp
| Minus SimpleExp SimpleExp
data BinOp = Plus SimpleExp SimpleExp --> infix "+"
| Mult SimpleExp SimpleExp --> infix "*"
| Div SimpleExp SimpleExp --> infix "/"
| Minus SimpleExp SimpleExp --> infix "-"
| CheckExpect SimpleExp SimpleExp
data SimpleExp =
Lit Int
Lit Int
| Var ID
| Apply ID (List0 Exp)
| BinOp BinOp
......
......@@ -13,5 +13,15 @@ as input. Running `stack exec gg-proto file.ast` will run the
grammar generator with the AST specified in the given file.
The file needs to be a valid Haskell file describing the
AST of your program, `src/Imp.ast` is given as an example.
The program currently will print out a portion of a yacc-like
grammar for the AST, devoid of any syntactic niceties.
The AST can contain a number of optional grammar directives.
Directives are of the form `--> directivename`, and are placed
after a construction in a data declaration. For example:
`data Exp = IF Exp Exp Exp --> if | WHILE Exp Exp --> while`
These are used to generate helpful grammar rules for commonly
used constructions.
The program currently will write two files; with `Lang.ast`
as the input AST, it will write `LangParser.y` and `LangLexer.x`
which contain a valid happy grammar and valid alex lexer,
respectively.
......@@ -5,7 +5,7 @@ description: Please see README.md
homepage: https://github.cs.tufts.edu/siriusly/gg-proto
author: Matthew Eads
build-type: Simple
extra-source-files: AST.hs
extra-source-files: AST.hs, FileStrings.hs
cabal-version: >=1.10
executable gg-proto
......@@ -16,6 +16,7 @@ executable gg-proto
haskell-src, template-haskell, regex-compat, containers,
Unique, split
default-language: Haskell2010
other-modules: FileStrings
source-repository head
type: git
......
......@@ -17,6 +17,7 @@ import Data.List.Unique
import Data.List
import Data.List.Split
import Data.Char
import FileStrings (makeParser, makeLexer)
type Directive = String
type Grammar = String
......@@ -68,12 +69,12 @@ stypeToString :: StrictType -> (String, String)
stypeToString (_, (ConT n)) = (showName n, "")
stypeToString (_, (AppT ListT t)) =
let (s, rs) = (stypeToString (IsStrict, t))
in (s ++ "*", (rs ++ "\n\n" ++ makeMultiRules (AppT (ConT list0) t) s))
in ("Multi0" ++ s, (rs ++ "\n\n" ++ makeMultiRules (AppT (ConT list0) t) s))
stypeToString (_, (AppT (ConT n) t)) =
if n == list0 || n == list1 then
let (s, rs) = (stypeToString (IsStrict, t))
c = if n == list0 then "*" else "+"
in (s ++ c, (rs ++ "\n\n" ++ makeMultiRules (AppT (ConT n) t) s))
c = if n == list0 then "Multi0" else "Multi1"
in (c ++ s, (rs ++ "\n\n" ++ makeMultiRules (AppT (ConT n) t) s))
else if n == (mkName "Maybe") then
let (s, rs) = (stypeToString (IsStrict, t))
in (("Maybe" ++ s),
......@@ -179,6 +180,11 @@ rhsToRule rules =
else error "bad number of constructions for if rule"
"while" -> ("\"while\" \"(\"" ++ (head ts) ++ " \")\" \"{\" " ++ (head (tail ts))
++ " \"}\"" ++ (rrhsL s [3, 6]), [])
('i':'n':'f':'i':'x':rest) ->
let op = cleanWS rest
(op', t) = if op == "" then (" \"" ++ s ++ "\" ", [s]) else (" "++op++" ", [])
in ((head ts) ++ op' ++ (head (tail ts)) ++ (rrhsL s [1,3]), t)
bad -> error ("directive " ++ bad ++ " not supported"))
strip :: String -> String
......@@ -262,7 +268,7 @@ cleanMulti s =
addDirectiveToRHS :: String -> [RHS] -> [RHS']
addDirectiveToRHS s ((RHS n cs):rs) =
let (rhs, rest) = case matchRegexAll (mkRegex "([^|]|[\n\r])*") s of
Just (_, match, after, _) -> (trace {-("\nfound rhs: " ++ match ++ "\n and rest: " ++ after)-} "" (match, after))
Just (_, match, after, _) -> (match, after)
Nothing -> (trace ("\nno match in finding rhs: " ++ s) (s, ""))
directive = case matchRegexAll (mkRegex "-- >.*") rhs of
Nothing -> ""
......@@ -322,33 +328,6 @@ parseAstDecs s name =
lexer = makeLexer name' (nub tokes)
in do {writeFile (name' ++ "Parser.y") parser; writeFile (name' ++ "Lexer.x") lexer;
return ();}
-- in (foldr (\(x, y) acc -> y ++ acc) "" decTs) ++ rules
makeParser :: String -> [String] -> String -> String -> String
makeParser name tokes grammar ast =
let tokenRules = cleanNLs $ foldr (\t a -> "\"" ++ t ++ "\""
++ " { " ++ "Token" ++ ((toUpper (head t)):(tail t)) ++
" }\n" ++ a) "" tokes
in
"{\nmodule " ++ name ++ "Parser where\nimport " ++ name ++ "Lexer\n}\n%name " ++ (map toLower name) ++
"\n%tokentype { Token }\n%token\n" ++ tokenRules ++ "Int { TokenInt $$ }\nID { TokenID $$ }\nset { TokenSet }\nif { TokenIf}\nplus { TokenPlus}\nwhile { TokenWhile }\nbegin { TokenBegin }\nString { TokenString $$ }\neof { TokenEOF }\nnl { TokenNL }\n%error {parseError}\n\n%%\n\n" ++ grammar ++
"\n{\n" ++ ast ++ "\nparseError :: [Token] -> a\nparseError t = error (\"Parse Error on token(s) \" ++ (show t))\n\ntype ID = String\n}\n"
makeLexer :: String -> [String] -> String
makeLexer name tokes =
let tokenRules = cleanNLs $ foldr (\t a ->
"<0> " ++ '"':t ++ "\"\t{ \\(pos,_,_,inp) len -> return Token"
++ ((toUpper (head t)):(tail t)) ++ " :: Alex Token }\n" ++ a) "" tokes
dataRules = cleanNLs $ foldr (\t a -> "\t| Token" ++ ((toUpper (head t)):(tail t))
++ "\n" ++ a) "" tokes
in ("{\nmodule " ++ name ++ "Lexer (runLexer, Token (..)) where\nimport Debug.Trace\n}\n\n%wrapper \"monad\"\n\n$spaces = [\\ \\t]\n$alpha = [a-zA-Z]\n$digits = [0-9]\n$alnum = [$alpha$digits]\n$alnumplus = [$alnum\\_\\-]\n$nl = [\\n\\r]\n\n@identifier = $alpha $alnumplus*\n@comment = \\-\\-.*\n@integer = $digits+\n@string = \\\"[^\\\"]*\\\"\n\n\n:-\n\n<0> @integer { \\(pos,_,_,inp) len -> return $ TokenInt (read (take len inp)) }\n<0> @string { \\(pos,_,_,inp) len -> return $ TokenString (take len inp) }\n\n<0> @identifier { \\(pos,_,_,inp) len -> return $ TokenID (take len inp) }\n\n"
++ tokenRules ++
"<0> \"=\" { \\(pos,_,_,inp) len -> return $ TokenEq }\n<0> \"+\" { \\(pos,_,_,inp) len -> return $ TokenPlus }\n<0> \"-\" { \\(pos,_,_,inp) len -> return $ TokenDash }\n<0> \"_\" { \\(pos,_,_,inp) len -> return $ TokenUnderscore }\n<0> \"\\[\" { \\(pos,_,_,inp) len -> return $ TokenLBrack }\n<0> \"\\]\" { \\(pos,_,_,inp) len -> return $ TokenRBrack }\n<0> \";\" { \\(pos,_,_,inp) len -> return $ TokenSemiColon }\n<0> \">\" { \\(pos,_,_,inp) len -> return $ TokenGT }\n<0> \"(\" { \\(pos,_,_,inp) len -> return $ TokenLParen }\n<0> \")\" { \\(pos,_,_,inp) len -> return $ TokenRParen }\n<0> \"\n\" { \\(pos,_,_,inp) len -> return $ TokenNL }\n<0> set { \\(pos,_,_,inp) len -> return $ TokenSet :: Alex Token}\n<0> if { \\(pos,_,_,inp) len -> return $ TokenIf :: Alex Token }\n<0> then { \\(pos,_,_,inp) len -> return $ TokenThen :: Alex Token }\n<0> else { \\(pos,_,_,inp) len -> return $ TokenElse :: Alex Token }\n<0> while { \\(pos,_,_,inp) len -> return $ TokenWhile :: Alex Token }\n<0> \"<\" { \\(pos,_,_,inp) len -> return $ TokenLT :: Alex Token }\n<0> \"<=\" { \\(pos,_,_,inp) len -> return $ TokenLE :: Alex Token }\n<0> \">=\" { \\(pos,_,_,inp) len -> return $ TokenGE :: Alex Token }\n<0> \"!\" { \\(pos,_,_,inp) len -> return $ TokenBang :: Alex Token }\n<0> \"@\" { \\(pos,_,_,inp) len -> return $ TokenAt :: Alex Token }\n<0> \"#\" { \\(pos,_,_,inp) len -> return $ TokenPound :: Alex Token }\n<0> \"$\" { \\(pos,_,_,inp) len -> return $ TokenDollar :: Alex Token }\n<0> \"%\" { \\(pos,_,_,inp) len -> return $ TokenPercent :: Alex Token }\n<0> \"^\" { \\(pos,_,_,inp) len -> return $ TokenCarat :: Alex Token }\n<0> \"&\" { \\(pos,_,_,inp) len -> return $ TokenAmpersand :: Alex Token }\n<0> \"*\" { \\(pos,_,_,inp) len -> return $ TokenStar :: Alex Token }\n<0> \"{\" { \\(pos,_,_,inp) len -> return $ TokenLBrace :: Alex Token }\n<0> \"}\" { \\(pos,_,_,inp) len -> return $ TokenRBrace :: Alex Token }\n<0> \"(\" { \\(pos,_,_,inp) len -> return $ TokenLParen :: Alex Token }\n<0> \")\" { \\(pos,_,_,inp) len -> return $ TokenRParen :: Alex Token }\n<0> \"?\" { \\(pos,_,_,inp) len -> return $ TokenQuestion :: Alex Token }\n<0> \"/\" { \\(pos,_,_,inp) len -> return $ TokenFSlash :: Alex Token }\n<0> \"\\\" { \\(pos,_,_,inp) len -> return $ TokenBSlash :: Alex Token }\n<0> \":\" { \\(pos,_,_,inp) len -> return $ TokenColon :: Alex Token }\n<0> \"\\\"\" { \\(pos,_,_,inp) len -> return $ TokenDoubleQuote :: Alex Token }\n<0> \"'\" { \\(pos,_,_,inp) len -> return $ TokenSingleQuote :: Alex Token }\n<0> \",\" { \\(pos,_,_,inp) len -> return $ TokenComma :: Alex Token }\n<0> \".\" { \\(pos,_,_,inp) len -> return $ TokenPeriod :: Alex Token }\n<0> \"?\" { \\(pos,_,_,inp) len -> return $ TokenQuestion :: Alex Token }\n<0> \"~\" { \\(pos,_,_,inp) len -> return $ TokenTilda :: Alex Token }\n<0> \"`\" { \\(pos,_,_,inp) len -> return $ TokenTick :: Alex Token }\n<0> \"|\" { \\(pos,_,_,inp) len -> return $ TokenBar :: Alex Token }\n\n\n<0> @comment ;\n<0> [\\ \\t \\n]+ ;\n<0> \"/*\" { begin comment }\n<comment> \"*/\" { begin 0 }\n<comment> . ;\n<0> $white ;\n. { \\(pos,_,_,inp) len -> error \"bad token\" }\n\n{ \n\n\n\n\nalexEOF :: Alex Token\nalexEOF = return TokenEOF\n\nlexwrap = (alexMonadScan >>=)\n\ndata Token = TokenID String \n | TokenSet \n | TokenIf\n | TokenThen\n | TokenElse\n | TokenWhile\n | TokenBegin\n | TokenInt Int\n | TokenVar\n | TokenString String \n | TokenEOF \n | TokenNL \n | TokenLParen\n | TokenRParen\n | TokenLT\n | TokenGT\n | TokenLE\n | TokenGE\n | TokenBang\n | TokenAt\n | TokenPound\n | TokenDollar\n | TokenPercent\n | TokenCarat\n | TokenAmpersand\n | TokenStar\n | TokenDash\n | TokenUnderscore\n | TokenPlus\n | TokenEq\n | TokenLBrace\n | TokenRBrace\n | TokenLBrack\n | TokenRBrack\n | TokenFSlash\n | TokenBSlash\n | TokenSemiColon\n | TokenColon\n | TokenDoubleQuote\n | TokenSingleQuote\n | TokenComma\n | TokenPeriod\n | TokenQuestion\n | TokenTilda\n | TokenTick\n | TokenBar\n "
++ dataRules ++
" deriving (Eq, Show)\n\ntokens str = runAlex str $ do\n let loop = do tok <- alexMonadScan\n if tok == TokenEOF\n then return []\n else do toks <- loop\n return $ tok : toks\n loop\n\nrunLexer s =\n case tokens s of\n (Right tokes) -> tokes\n (Left err) -> error err\n\n}\n\n ")
main :: IO ()
main = do {
......
module FileStrings where
import Data.Char
cleanNLs' :: String -> String
cleanNLs' ('\n':'\n':'\n':s) = cleanNLs' ('\n':'\n':s)
cleanNLs' (x:s) = x:(cleanNLs' s)
cleanNLs' [] = ""
cleanNLs :: String -> String
cleanNLs s = cleanNLs' (dropWhile ((==) '\n') s)
makeParser :: String -> [String] -> String -> String -> String
makeParser name tokes grammar ast =
let tokenRules = cleanNLs $ foldr (\t a -> "\"" ++ t ++ "\""
++ " { " ++ "Token" ++ ((toUpper (head t)):(tail t)) ++
" }\n" ++ a) "" tokes
in
"{\nmodule " ++ name ++ "Parser where" ++
"\nimport " ++ name ++ "Lexer\n}" ++
"\n%name " ++ (map toLower name) ++
"\n%tokentype { Token }\n" ++
"%token\n" ++
tokenRules ++
"Int { TokenInt $$ }\n" ++
"ID { TokenID $$ }\n" ++
"set { TokenSet }\n" ++
"if { TokenIf}\n" ++
"plus { TokenPlus}\n" ++
"while { TokenWhile }\n" ++
"begin { TokenBegin }\n" ++
"String { TokenString $$ }\n" ++
"eof { TokenEOF }\n" ++
"nl { TokenNL }\n" ++
"%error {parseError}\n\n%%\n\n" ++
grammar ++
"\n{\n" ++ ast ++
"\nparseError :: [Token] -> a\n" ++
"parseError t = error (\"Parse Error on token(s) \" ++ (show t))\n\n" ++
"type ID = String\n}\n"
makeLexer :: String -> [String] -> String
makeLexer name tokes =
let tokenRules = cleanNLs $ foldr (\t a ->
"<0> " ++ '"':t ++ "\"\t{ \\(pos,_,_,inp) len -> return Token"
++ ((toUpper (head t)):(tail t)) ++ " :: Alex Token }\n" ++ a) "" tokes
dataRules = cleanNLs $ foldr (\t a -> "\t| Token" ++ ((toUpper (head t)):(tail t))
++ "\n" ++ a) "" tokes
in ("{\nmodule " ++ name ++ "Lexer (runLexer, Token (..)) where\n" ++
"import Debug.Trace\n}\n\n" ++
"%wrapper \"monad\"\n\n" ++
"$spaces = [\\ \\t]\n" ++
"$alpha = [a-zA-Z]\n" ++
"$digits = [0-9]\n" ++
"$alnum = [$alpha$digits]\n" ++
"$alnumplus = [$alnum\\_\\-]\n" ++
"$nl = [\\n\\r]\n\n" ++
"@identifier = $alpha $alnumplus*\n" ++
"@comment = \\-\\-.*\n" ++
"@integer = $digits+\n" ++
"@string = \\\"[^\\\"]*\\\"\n\n\n" ++
":-\n\n" ++
"<0> @integer { \\(pos,_,_,inp) len -> return $ TokenInt (read (take len inp)) }\n" ++
"<0> @string { \\(pos,_,_,inp) len -> return $ TokenString (take len inp) }\n\n" ++
"<0> @identifier { \\(pos,_,_,inp) len -> return $ TokenID (take len inp) }\n\n"
++ tokenRules ++
"<0> \"=\"\t { \\(pos,_,_,inp) len -> return $ TokenEq }\n" ++
"<0> \"+\" { \\(pos,_,_,inp) len -> return $ TokenPlus }\n" ++
"<0> \"-\" { \\(pos,_,_,inp) len -> return $ TokenDash }\n" ++
"<0> \"_\" { \\(pos,_,_,inp) len -> return $ TokenUnderscore }\n" ++
"<0> \"\\[\" { \\(pos,_,_,inp) len -> return $ TokenLBrack }\n" ++
"<0> \"\\]\" { \\(pos,_,_,inp) len -> return $ TokenRBrack }\n" ++
"<0> \";\" { \\(pos,_,_,inp) len -> return $ TokenSemiColon }\n" ++
"<0> \">\" { \\(pos,_,_,inp) len -> return $ TokenGT }\n" ++
"<0> \"(\" { \\(pos,_,_,inp) len -> return $ TokenLParen }\n" ++
"<0> \")\" { \\(pos,_,_,inp) len -> return $ TokenRParen }\n" ++
"<0> \"\n\" { \\(pos,_,_,inp) len -> return $ TokenNL }\n" ++
"<0> set { \\(pos,_,_,inp) len -> return $ TokenSet :: Alex Token}\n" ++
"<0> if { \\(pos,_,_,inp) len -> return $ TokenIf :: Alex Token }\n" ++
"<0> then { \\(pos,_,_,inp) len -> return $ TokenThen :: Alex Token }\n" ++
"<0> else { \\(pos,_,_,inp) len -> return $ TokenElse :: Alex Token }\n" ++
"<0> while { \\(pos,_,_,inp) len -> return $ TokenWhile :: Alex Token }\n" ++
"<0> \"<\" { \\(pos,_,_,inp) len -> return $ TokenLT :: Alex Token }\n" ++
"<0> \"<=\" { \\(pos,_,_,inp) len -> return $ TokenLE :: Alex Token }\n" ++
"<0> \">=\" { \\(pos,_,_,inp) len -> return $ TokenGE :: Alex Token }\n" ++
"<0> \"!\" { \\(pos,_,_,inp) len -> return $ TokenBang :: Alex Token }\n" ++
"<0> \"@\" { \\(pos,_,_,inp) len -> return $ TokenAt :: Alex Token }\n" ++
"<0> \"#\" { \\(pos,_,_,inp) len -> return $ TokenPound :: Alex Token }\n" ++
"<0> \"$\" { \\(pos,_,_,inp) len -> return $ TokenDollar :: Alex Token }\n" ++
"<0> \"%\" { \\(pos,_,_,inp) len -> return $ TokenPercent :: Alex Token }\n" ++
"<0> \"^\" { \\(pos,_,_,inp) len -> return $ TokenCarat :: Alex Token }\n" ++
"<0> \"&\" { \\(pos,_,_,inp) len -> return $ TokenAmpersand :: Alex Token }\n" ++
"<0> \"*\" { \\(pos,_,_,inp) len -> return $ TokenStar :: Alex Token }\n" ++
"<0> \"{\" { \\(pos,_,_,inp) len -> return $ TokenLBrace :: Alex Token }\n" ++
"<0> \"}\" { \\(pos,_,_,inp) len -> return $ TokenRBrace :: Alex Token }\n" ++
"<0> \"(\" { \\(pos,_,_,inp) len -> return $ TokenLParen :: Alex Token }\n" ++
"<0> \")\" { \\(pos,_,_,inp) len -> return $ TokenRParen :: Alex Token }\n" ++
"<0> \"?\" { \\(pos,_,_,inp) len -> return $ TokenQuestion :: Alex Token }\n" ++
"<0> \"/\" { \\(pos,_,_,inp) len -> return $ TokenFSlash :: Alex Token }\n" ++
"<0> \"\\\" { \\(pos,_,_,inp) len -> return $ TokenBSlash :: Alex Token }\n" ++
"<0> \":\" { \\(pos,_,_,inp) len -> return $ TokenColon :: Alex Token }\n" ++
"<0> \"\\\"\" { \\(pos,_,_,inp) len -> return $ TokenDoubleQuote :: Alex Token }\n" ++
"<0> \"'\" { \\(pos,_,_,inp) len -> return $ TokenSingleQuote :: Alex Token }\n" ++
"<0> \",\" { \\(pos,_,_,inp) len -> return $ TokenComma :: Alex Token }\n" ++
"<0> \".\" { \\(pos,_,_,inp) len -> return $ TokenPeriod :: Alex Token }\n" ++
"<0> \"?\" { \\(pos,_,_,inp) len -> return $ TokenQuestion :: Alex Token }\n" ++
"<0> \"~\" { \\(pos,_,_,inp) len -> return $ TokenTilda :: Alex Token }\n" ++
"<0> \"`\" { \\(pos,_,_,inp) len -> return $ TokenTick :: Alex Token }\n" ++
"<0> \"|\" { \\(pos,_,_,inp) len -> return $ TokenBar :: Alex Token }\n\n\n" ++
"<0> @comment ;\n" ++
"<0> [\\ \\t \\n]+ ;\n" ++
"<0> \"/*\" { begin comment }\n" ++
"<comment> \"*/\" { begin 0 }\n" ++
"<comment> . ;\n" ++
"<0> $white ;\n" ++
". { \\(pos,_,_,inp) len -> error \"bad token\" }\n\n" ++
"{ \n\n\n\n\n" ++
"alexEOF :: Alex Token\n" ++
"alexEOF = return TokenEOF\n\n" ++
"lexwrap = (alexMonadScan >>=)\n\n" ++
"data Token = TokenID String \n" ++
" | TokenSet \n" ++
" | TokenIf\n" ++
" | TokenThen\n" ++
" | TokenElse\n" ++
" | TokenWhile\n" ++
" | TokenBegin\n" ++
" | TokenInt Int\n" ++
" | TokenVar\n" ++
" | TokenString String \n" ++
" | TokenEOF \n" ++
" | TokenNL \n" ++
" | TokenLParen\n" ++
" | TokenRParen\n" ++
" | TokenLT\n" ++
" | TokenGT\n" ++
" | TokenLE\n" ++
" | TokenGE\n" ++
" | TokenBang\n" ++
" | TokenAt\n" ++
" | TokenPound\n" ++
" | TokenDollar\n" ++
" | TokenPercent\n" ++
" | TokenCarat\n" ++
" | TokenAmpersand\n" ++
" | TokenStar\n" ++
" | TokenDash\n" ++
" | TokenUnderscore\n" ++
" | TokenPlus\n" ++
" | TokenEq\n" ++
" | TokenLBrace\n" ++
" | TokenRBrace\n" ++
" | TokenLBrack\n" ++
" | TokenRBrack\n" ++
" | TokenFSlash\n" ++
" | TokenBSlash\n" ++
" | TokenSemiColon\n" ++
" | TokenColon\n" ++
" | TokenDoubleQuote\n" ++
" | TokenSingleQuote\n" ++
" | TokenComma\n" ++
" | TokenPeriod\n" ++
" | TokenQuestion\n" ++
" | TokenTilda\n" ++
" | TokenTick\n" ++
" | TokenBar\n "
++ dataRules ++
" deriving (Eq, Show)\n\n" ++
"tokens str = runAlex str $ do\n" ++
" let loop = do tok <- alexMonadScan\n" ++
" if tok == TokenEOF\n" ++
" then return []\n" ++
" else do toks <- loop\n" ++
" return $ tok : toks\n" ++
" loop\n\n" ++
"runLexer s =\n" ++
" case tokens s of\n" ++
" (Right tokes) -> tokes\n" ++
" (Left err) -> error err\n\n" ++
"}\n\n ")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment