-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathFleaParser.py
58 lines (48 loc) · 1.41 KB
/
FleaParser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# TODO: build a parser
from FleaLexer import tokenize, op_keywords
tokens = tokenize(text = '/edit 4 to:"5fdsafdafda fdsafdas"', prefix = '/')
print(tokens)
"""
TODO:
RULES:
each function has it's own order of tokens
1. /edit [number] [newvalue][colon][number] or [substring]
2. /add [substring] [number] [bareword]
3. /remove [number] [bareword]
4. /display [bareword] or [all]
5. /prioritize [number] [number]
6. /move [number] [bareword] [number] [bareword]
7. /save [bareword] or [all]
8. /exit
9. /help
"""
for token, txt in tokens:
print(token, txt)
def checkOrder(tokenlist) -> bool:
comparison =
if tokenlist[1] not in op_keywords:
return False
def parse(tokenlist):
"""
TODO:
The parser will first check if the tokens are in the right order for each rule
Then, it will generate a list of the text of the values
Then, every value in this list will be converted to it's appropriate datatype depending on what kind of token it is,
Then, the list will be passed on as a tuple to unpack in the util file into the approprate funtion.
"""
match tokenlist[1]:
case "edit":
pass
case "edit":
pass
case "edit":
pass
case "edit":
pass
case "edit":
pass
case "edit":
pass
case "edit":
pass
pass