commit d8072402b929b1ba16dd247d1493cfdfd26d09d6
parent 5f603bd951801baad051405ff51f703739fa2c6e
Author: Paul Longtine <paul@nanner.co>
Date: Tue, 19 Sep 2017 17:36:26 -0400
Finally resolved the issue with resolving nested objects. WOOOOOOOOOO!
Diffstat:
5 files changed, 80 insertions(+), 19 deletions(-)
diff --git a/src/lc/interpreter.py b/src/lc/interpreter.py
@@ -30,17 +30,25 @@ class Label(AbstractToken):
names = self.data.rsplit(".", 1)
+ # Determine if we're a simple property or a property that is part of an
+ # object, or if it exists at the root level of a namespace
if len(names) > 1:
self.is_property = True
self.parent = Label(self.i, names[0])
+ self.parent.update()
+
self.name = names[1]
- t = self.i.ns.resolve_with_obj(self.parent, self.name)
+ t = self.i.ns.resolve_with_obj(self.parent, names[1])
+
self.expr = t[0]
+
else:
self.name = names[0]
+
t = self.i.ns.resolve(self.name)
+
self.scope = t[0]
self.expr = t[1]
@@ -48,7 +56,6 @@ class Label(AbstractToken):
if s:
return(self.scope)
else:
- print(self.name, self.expr)
return(int_to_word(self.expr))
class Arguements(AbstractToken):
@@ -124,7 +131,7 @@ class Expression(AbstractToken):
[">=", Opcode(OP_GTHAN_EQ)],
["=<", Opcode(OP_LTHAN_EQ)]
]
-
+ # Flatten the above array
self.operator_names = list(map(lambda x: x[0], self.operators))
self.func_call = Statement(
diff --git a/src/lc/lexer.py b/src/lc/lexer.py
@@ -131,6 +131,7 @@ class Tokenizer():
def __init__(self, symbol_delim, statement_delim):
self.symbol_delim = symbol_delim
self.statement_delim = statement_delim
+
self.symbols = []
# Based off of self.symbol_delim, and string literals, break code into bits
@@ -174,9 +175,12 @@ class Tokenizer():
tmp.append(x)
self.symbols = tmp
- def generate_statements(self):
+ def generate_statements(self, raw):
rv = []
tmp = []
+
+ self.generate_symbols(raw)
+
for i in self.symbols:
t = i.strip()
if len(t) > 0:
diff --git a/src/lc/namespace.py b/src/lc/namespace.py
@@ -72,11 +72,19 @@ class Namespace():
def resolve_with_obj(self, parent, name):
rv = None
- obj = self.obj_resolve(parent.name)
- print(parent.name, name)
- if name in obj:
- rv = obj[name]
+ if parent.is_property == True:
+ obj = self.resolve_with_obj(parent.parent, parent.name)
+ else:
+ obj = self.obj_resolve(parent.name)
+
+ if type(obj) == dict:
+ if name in obj:
+ rv = obj[name]
+ elif type(obj) == list:
+ if name in obj[1]:
+ rv = obj[1][name]
+
return rv
# Resolves name into scope and address
diff --git a/src/lc/parser.py b/src/lc/parser.py
@@ -55,21 +55,23 @@ class Parser():
"hashtable",
"stack"
]
-
+ # Defines what integers look like
self.int_def = AtomicSymbol("^[0-9]+$")
+ # Regular expression for encapsulating text in `"`, simply
self.str_def = AtomicSymbol("^\0+")
-
+ # Defines what type names exists
self.type_def = InclusiveSymbol(self.defined_types)
+ # Defines what reserved names exists
self.label_def = ExclusiveSymbol(self.defined_types +
[self.int_def] +
[self.str_def] +
self.known_tokens )
-
+ # Defines the parameter list defintion
self.paramlist_def = GroupingSymbol( [
AtomicSymbol("\("),
AtomicSymbol("\)")
] )
-
+ # Defines the expression definition
self.expr_def = PolySymbol( [
self.label_def,
self.int_def,
@@ -392,30 +394,46 @@ class Parser():
self.statement_assign,
self.statement_expression
]
- data=""
- with open(file_name, 'r') as program:
- data=program.read().replace('\n', '')
-
+
+ # This is the definition for what is a symbol
self.symbols = Tokenizer(self.splitters, self.end_statements)
- self.symbols.generate_symbols(data)
+ # This holds the program.
+ data = ""
+ # Open the file, and replace every newline with a space.
+ with open(file_name, 'r') as program:
+ data=program.read().replace('\n', '')
- self.lines = self.symbols.generate_statements()
+ # Now, parse our program into statements
+ self.lines = self.symbols.generate_statements(data)
def get_statements(self):
rv = []
+ # Go through our program statement by statement and get line numbers
for num, l in enumerate(self.lines):
+ # Now, for each active token we have defined, step through and find
+ # which lines match which tokens
+ #
+ # NOTE: The order of active_tokens is of most-probable to match
+ # to least-probable to match
for a in self.active_tokens:
r = a.match(l)
+ # If the line matches the token,
if r:
+ # If the token is an "incude" token, include the file
+ # specified by the "include" directive
if a.name == "include":
+ # Create a new Parser instance pointing to the file
+ # specified by the first arguement
t = Parser(r[1][0] + ".ti")
l = t.get_statements()
rv.extend(l)
else:
+ # We are a normal token, return the type of token
+ # along with the list of matching tokens
rv.append([a,r,[]])
print("{}: {}\t{}".format(str(num).rjust(4),
- a.name.rjust(15),r))
+ a.name.rjust(15), r))
break
return rv
diff --git a/src/lc/test_files/problem.ti b/src/lc/test_files/problem.ti
@@ -0,0 +1,24 @@
+class UsedByUsedByTesting:
+{
+ int property_three = 3;
+}
+
+class UsedByTesting:
+{
+ int property_two = 2;
+
+ UsedByUsedByTesting x = new UsedByUsedByTesting();
+}
+
+class Testing:
+{
+ int property_one = 1;
+
+ UsedByTesting y = new UsedByTesting();
+
+ print y.x.property_three;
+ print y.property_two;
+ print property_one;
+}
+
+Testing t = new Testing();