-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathautomata.py
More file actions
159 lines (118 loc) · 5.41 KB
/
automata.py
File metadata and controls
159 lines (118 loc) · 5.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
from re import search, sub
from os import remove
from xml.etree import ElementTree
import logging
from common import reduces_to_true
from goal_utils import get_tmp_file_name, execute_goal_script, execute_translation, strip_unused_symbols
from python_ext import readfile
from siregex import to_regex, regex_to_proposition
from structs import Automaton, SpecType, PropertySpec
logger = logging.getLogger(__name__)
def automaton_from_gff(gff: str, complement: bool=False) -> Automaton:
gff = strip_unused_symbols(gff)
input_file_name = get_tmp_file_name()
output_file_name = get_tmp_file_name()
output_file_name2 = get_tmp_file_name()
with open(input_file_name, 'w') as f:
f.write(gff)
complement_stmnt = '' if not complement else '\n$res = complement $res;'
goal_script = """
$res = load "{input_file_name}"; {complement_stmnt}
$res = simplify -m fair -dse -ds -rse -rs -ru -rd $res;
$res = determinization -m bk09 $res;
acc -min $res;
save $res {output_file_name};
acc -max $res;
save $res {output_file_name2};
""".format(complement_stmnt=complement_stmnt,
input_file_name=input_file_name,
output_file_name=output_file_name,
output_file_name2=output_file_name2)
execute_goal_script(goal_script)
gff = readfile(output_file_name)
gff2 = readfile(output_file_name2)
init_state2, states2, edges2, acc_states2 = gff_2_automaton_params(gff2)
nacc_trap_states2 = get_nacc_trap_states(states2, acc_states2, edges2)
if set(nacc_trap_states2).union(acc_states2) == set(states2):
# safety automaton
automaton = Automaton(states2, init_state2, acc_states2, nacc_trap_states2, True, tuple(edges2.items()))
else:
# contains both dead states, and accepting states
init_state, states, edges, acc_states = gff_2_automaton_params(gff)
nacc_trap_states = get_nacc_trap_states(states, acc_states, edges)
automaton = Automaton(states, init_state, acc_states, nacc_trap_states, False, tuple(edges.items()))
logger.info('after all manipulations: %s', ['liveness', 'safety'][automaton.is_safety])
remove(input_file_name)
remove(output_file_name)
remove(output_file_name2)
return automaton
def automaton_from_spec(spec: PropertySpec) -> Automaton:
"""
We return an automaton that is 'positive deterministic Buchi'
(we complement negated ones)
(as an optimization the automaton has `fair` and `bad` states).
As the last step we do `determinize`, which completes the automaton.
The states are of three types:
- acc non-trap (aka `accepting states`)
- acc trap (aka `accepting states`)
- non-acc non-trap (aka `normal`)
- non-acc trap (aka `dead`)
NOTE we should never fall-out of the automaton.
"""
logger.info('building automaton from spec "%s" of type %s', spec.desc, spec.type)
logger.debug(spec)
# TODO better spec parsing
get_gff = {SpecType.AUTOMATON_SPEC: readfile,
SpecType.LTL_SPEC: ltl_2_automaton_gff,
SpecType.PLTL_SPEC: pltl_2_automaton_gff,
SpecType.ORE_SPEC: ore_2_automaton_gff}
gff = get_gff[spec.type](spec.data)
return automaton_from_gff(gff, not spec.is_positive)
def get_nacc_trap_states(states, acc_states, edges):
nacc_states = set(states).difference(acc_states)
nacc_trap_states = set()
for s in nacc_states:
s_edges = edges.get((s, s))
if s_edges:
if reduces_to_true(s_edges):
nacc_trap_states.add(s) # TODOopt: also replace edges with one trivial
return nacc_trap_states
def gff_2_automaton_params(gff_xml: str): # -> init, states, edges (dict (src,dst) |-> labels), acc
# TODOopt: (boolean) simplify transitions labels
# """
# >>> print(gff_2_automaton(readfile('/tmp/tmp.gff')))
# """
states = set()
edges = dict() # (src,dst) -> set of labels
root = ElementTree.fromstring(gff_xml)
for transition_element in root.iter('Transition'):
src = transition_element.find('From').text
dst = transition_element.find('To').text
lbl = transition_element.find('Label').text
states.add(src)
states.add(dst)
if (src, dst) not in edges:
edges[(src, dst)] = set()
edges[(src, dst)].add(tuple(lbl.split()))
init_state = root.find('InitialStateSet').find('StateID').text
acc_states = set(elem.text for elem in root.find('Acc').iter('StateID'))
return init_state, states, edges, acc_states
def ltl_2_automaton_gff(ltl: str) -> str:
return execute_translation("QPTL", ltl, "-m ltl2ba -t nbw")
def pltl_2_automaton_gff(ltl: str) -> str:
return execute_translation("QPTL", ltl, "-m pltl2ba -t nbw")
def check_correct_ore(omega_regex: str):
match = search("[\W]not_", omega_regex)
assert not match, "\"%s\" contains illegal sequence 'not_'" % omega_regex
match = search("_and_", omega_regex)
assert not match, "\"%s\" contains illegal sequence '_and_'" % omega_regex
def ore_2_automaton_gff(omega_regex: str) -> str:
check_correct_ore(omega_regex)
w_regex = to_regex(omega_regex)
w_regex = sub("([\W])(?:[Tt]rue|\.)([\W])", "\\1True\\2", w_regex)
result = execute_translation("ORE", w_regex, "-se -sa")
result = sub("<Alphabet type=\"Classical\">",
"<Alphabet type=\"Propositional\">",
result)
result = regex_to_proposition(result)
return result