summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorOwen Jacobson <owen@grimoire.ca>2017-11-14 02:51:27 -0500
committerOwen Jacobson <owen@grimoire.ca>2017-11-14 02:51:27 -0500
commit6ab0fb837e5b1cc40002037e2ed15505f99cbbe3 (patch)
treed7fbc91d82588af74b5f4a931c7ba3344f63ab41 /tests
parent10dad94b84ae0a41b1ff03d6d705732834958efb (diff)
Macro expander.
This includes a fairly complete quasiquote system, and a complete rework of the expander.
Diffstat (limited to 'tests')
-rw-r--r--tests/forms.py12
-rw-r--r--tests/test_evaluator.py3
-rw-r--r--tests/test_reader.py4
-rw-r--r--tests/tokens.py38
4 files changed, 37 insertions, 20 deletions
diff --git a/tests/forms.py b/tests/forms.py
index 1a49636..7ea216f 100644
--- a/tests/forms.py
+++ b/tests/forms.py
@@ -1,6 +1,7 @@
from hypothesis.strategies import integers, decimals as hypo_decimals, booleans, characters, text, tuples, lists as hypo_lists, just, one_of
from hypothesis.strategies import deferred, recursive
+from actinide import tokenizer as t
from actinide.symbol_table import *
from actinide.types import *
@@ -37,7 +38,7 @@ def strings():
# Generates any character legal in a symbol, which cannot be part of some other
# kind of atom.
def symbol_characters():
- return characters(blacklist_characters='01234567890#. \t\n();"')
+ return characters(blacklist_characters='01234567890#' + t.whitespace + t.parens + t.quotes + t.string_delim + t.comment_delim)
# Generates symbols guaranteed not to conflict with other kinds of literal. This
# is a subset of the legal symbols.
@@ -45,6 +46,15 @@ def symbols():
return text(symbol_characters(), min_size=1)\
.map(lambda item: symbol_table[item])
+def quoted_forms():
+ return tuples(
+ one_of(
+ symbol_table[q]
+ for q in ['quote', 'quasiquote', 'unquote', 'unquote-splicing']
+ ),
+ deferred(lambda: forms),
+ ).map(lambda elems: list(*elems))
+
# Generates atoms.
def atoms():
return one_of(
diff --git a/tests/test_evaluator.py b/tests/test_evaluator.py
index dbccbce..cb7c11c 100644
--- a/tests/test_evaluator.py
+++ b/tests/test_evaluator.py
@@ -13,6 +13,7 @@ from .programs import *
def test_evaluator(program_result):
program, result, bindings = program_result
environment = Environment()
- assert run(eval(program, symbol_table, None), environment) == result
+ macros = Environment()
+ assert run(eval(program, symbol_table, None), environment, macros) == result
for symbol, value in bindings:
assert environment[symbol] == value
diff --git a/tests/test_reader.py b/tests/test_reader.py
index 54a1681..b67c05e 100644
--- a/tests/test_reader.py
+++ b/tests/test_reader.py
@@ -12,7 +12,7 @@ from .forms import *
# * Given a form, can the reader recover it from its display?
@given(forms())
def test_reader(form):
- input = display(form)
+ input = display(form, symbol_table)
port = string_to_input_port(input)
assert read(port, symbol_table) == form
@@ -21,7 +21,7 @@ def test_reader(form):
# without touching the garbage? This is only reliable with lists and conses.
@given(lists() | conses(), text())
def test_reader_with_trailing(form, text):
- input = display(form) + text
+ input = display(form, symbol_table) + text
port = string_to_input_port(input)
assert read(port, symbol_table) == form
diff --git a/tests/tokens.py b/tests/tokens.py
index 3eb58b8..0e98494 100644
--- a/tests/tokens.py
+++ b/tests/tokens.py
@@ -1,23 +1,29 @@
from hypothesis.strategies import just, one_of, characters, text, lists, tuples
from hypothesis.strategies import composite, recursive
+from actinide import tokenizer as t
+
# Generators for token families
-# Generates the `(` token.
-def open_parens():
- return just('(')
+# Generates the `(` and ')' tokens.
+def parens():
+ return one_of(just(p) for p in t.parens)
-# Generates the ')' token.
-def close_parens():
- return just(')')
+def quotes():
+ return one_of(
+ just("'"),
+ just('`'),
+ just(','),
+ just(',@'),
+ )
# Generates characters that are legal, unescaped, inside of a string.
def string_bare_characters():
- return characters(blacklist_characters='\\"')
+ return characters(blacklist_characters=t.string_escaped)
# Generates legal string escape sequences.
def string_escaped_characters():
- return one_of(just('"'), just('\\')).map(lambda c: '\\' + c)
+ return one_of(just(c) for c in t.string_escaped).map(lambda c: '\\' + c)
# Generates single-character string representations, including escapes.
def string_characters():
@@ -34,7 +40,7 @@ def strings():
# Generates characters which are legal within a symbol.
def symbol_characters():
- return characters(blacklist_characters=' \t\n();"')
+ return characters(blacklist_characters=t.whitespace + t.parens + t.quotes + t.string_delim + t.comment_delim)
# Generates legal symbols.
def symbols():
@@ -42,11 +48,11 @@ def symbols():
# Generates single whitespace characters.
def whitespace_characters():
- return one_of(just('\n'), just(' '), just('\t'))
+ return one_of(just(c) for c in t.whitespace)
# Generates a single token.
def tokens():
- return one_of(symbols(), strings(), open_parens(), close_parens())
+ return one_of(symbols(), strings(), parens(), quotes())
# Generates a string which may not be empty, but which does not contain a token.
def nontokens():
@@ -78,16 +84,16 @@ def spaced_tokens():
return tuples(intertokens(), strategy)
def unspaced(strategy):
return tuples(one_of(just(''), intertokens()), strategy)
+ def spaced_quotes():
+ return spaced(quotes())
def spaced_symbols():
return spaced(symbols())
def spaced_strings():
return unspaced(strings())
- def spaced_open_parens():
- return unspaced(open_parens())
- def spaced_close_parens():
- return unspaced(close_parens())
+ def spaced_parens():
+ return unspaced(parens())
- return one_of(spaced_symbols(), spaced_strings(), spaced_open_parens(), spaced_close_parens())
+ return one_of(spaced_symbols(), spaced_quotes(), spaced_strings(), spaced_parens())
# Generats a list of pairs as per spaced_token().
def spaced_token_sequences():