1class LeftKanExtension: 2 3 # Implementation of algorithms described by Brown and Heyworth (ref.251) 4 # and Heyworth (ref.253). 5 6 def __init__(self, mod, A, B, R, X, F): 7 # External subsystem dependencies 8 # mod.KnuthBendix 9 # mod.FiniteAutomaton 10 # mod.SolveFSA 11 # mod.Cat 12 # mod.Cat.Function 13 # mod.Cat.Functor 14 # mod.Cat.check_graph 15 # mod.Cat.check_rules 16 17 self.mod = mod 18 self.Cat = mod.Cat 19 20 # 21 22 self.Cat.check_graph(A) 23 self.Cat.check_graph(B) 24 self.Cat.check_rules(R, B) 25 26 # 27 28 self.A = A 29 self.B = B 30 self.R = [(tuple(g), tuple(h)) for (g, h) in R] 31 self.X = X 32 self.F = F 33 34 self.general_procedure() 35 36 def general_procedure(self): 37 self.initialize_tables() 38 self.make_confluent_system() 39 self.make_automaton() # self.make_catalogue() 40 self.make_natural_transformation() 41 42 def initialize_tables(self): 43 self.obj_to_str_table = {} 44 self.str_to_obj_table = {} 45 self.make_initial_rules() 46 47 def make_initial_rules(self): 48 49 # Algorithm 6.1 in (251) 50 51 Re = [] 52 53 def add_rule(a, b): 54 aw = self.make_word(a) 55 bw = self.make_word(b) 56 if aw != bw: 57 Re.append((aw, bw)) 58 59 for a in self.A.arrows: 60 srca = self.A.source(a) 61 tgta = self.A.target(a) 62 XA = self.X.fo(srca) 63 Xa = self.X.fa(a) 64 Fa = tuple(self.F.fa(a)) 65 Fsrca = self.F.fo(srca) 66 Ftgta = self.F.fo(tgta) 67 if Fa: 68 t = Fsrca 69 for b in Fa: 70 srcb = self.B.source(b) 71 if srcb != t: 72 raise ValueError( 73 'Arrow [%s] with source %s does not compose with target %s' % (b, srcb, t)) 74 t = self.B.target(b) 75 if t != Ftgta: 76 raise ValueError( 77 'Arrow %s with target %s does not compose with %s' % (Fa, t, Ftgta)) 78 else: 79 if Fsrca != Ftgta: 80 raise ValueError( 81 'Source %s does not match target %s' % (Fsrca, Ftgta)) 82 for x in XA: 83 add_rule(((srca, x),) + Fa, ((tgta, Xa(x)),)) 84 85 Rk = [(self.make_word(x), self.make_word(y)) for (x, y) in self.R] 86 87 self.Re = Re 88 self.Rk = Rk 89 self.Rinit = Re + Rk 90 91 def make_confluent_system(self): 92 self.rs = self.mod.KnuthBendix(self.Rinit, delim='.') 93 self.Rconf = self.rs.reductions 94 95 def make_automaton(self): 96 # Make nondeterministic finite automaton 97 98 def target(e): 99 if len(e) == 1 and isinstance(e[0], tuple): 100 return self.F.fo(e[0][0]) 101 else: 102 return self.B.target(e[-1]) 103 104 XA = [] 105 for A in self.A.objects: 106 for x in self.X.fo(A): 107 XA.append(((A, x),)) 108 109 follows = dict([(B, []) for B in self.B.objects]) 110 for b, (srcb, tgtb) in list(self.B.arrows.items()): 111 follows[srcb].append((b, tgtb)) 112 113 IR = dict([(self.make_term(u), self.make_term(v)) 114 for u, v in self.Rconf]) 115 116 pplR = {} 117 for l, r in self.Rconf: 118 t = self.make_term(l) 119 for i in range(1, len(t)): 120 pplR[t[:i]] = 1 121 122 s0 = ('s0',) 123 124 fsa = self.mod.FiniteAutomaton(s0) 125 126 for xi in XA: 127 if xi not in IR: 128 fsa.add_transition(s0, xi[0], xi) 129 130 for xi in XA: 131 for b, tgtb in follows[target(xi)]: 132 bterm = (b,) 133 xib = xi + bterm 134 if xib in pplR: 135 fsa.add_transition(xi, b, xib, tgtb) 136 elif (bterm in pplR and xib not in IR): 137 fsa.add_transition(xi, b, bterm, tgtb) 138 elif xib not in IR: 139 fsa.add_transition(xi, b, tgtb) 140 141 for Bi in self.B.objects: 142 for b, tgtb in follows[Bi]: 143 bterm = (b,) 144 if bterm in pplR: 145 fsa.add_transition(Bi, b, bterm, tgtb) 146 elif bterm not in IR: 147 fsa.add_transition(Bi, b, tgtb) 148 149 for u in pplR: 150 if u in XA: 151 continue 152 for b, tgtb in follows[target(u)]: 153 bterm = (b,) 154 ub = u + bterm 155 if ub in pplR: 156 fsa.add_transition(u, b, ub, tgtb) 157 elif self.irreducible(ub): # ub not in IR: 158 fsa.add_transition(u, b, tgtb) 159 160 def get_RS(Bi): 161 finals = {} 162 finals[Bi] = 1 163 for xi in XA: 164 if self.F.fo(xi[0][0]) == Bi: 165 finals[xi] = 1 166 for u in pplR: 167 if target(u) == Bi: 168 finals[u] = 1 169 170 for c in fsa.get_composites(): 171 for s in c: 172 if s not in finals: 173 break 174 else: 175 finals[c] = 1 176 177 dfa = fsa.get_minimized_dfa(finals) 178 regexp = self.mod.SolveFSA(dfa) 179 return RegularSet(regexp) 180 181 KB = self.Cat.Function(get_RS, self.B.objects, None) 182 183 Kb = self.Cat.Function( 184 lambda a: KanAction(self.B, KB, a, target, 185 self.irreducible, self.reduce), 186 self.B.arrows, 187 KanAction, 188 ) 189 190 self.KB = KB 191 self.Kb = Kb 192 self.K = self.Cat.Functor(KB, Kb) 193 194 def make_catalogue(self): 195 # Catalogue the elements of the sets pointed to by extension functor K, 196 # according to algorithm described in 7.1 in (251). 197 198 # Precondition: 199 200 # Tables initialized and a confluent system created. 201 # The system is assumed to be finite, otherwise we won't terminate. 202 203 # Postcondition: 204 205 # Functor self.K represented as: 206 # 207 # self.K.tabo = self.KB = dict mapping, 208 # source: {each B in self.B.objects} 209 # target: sets represented as lists 210 # self.K.taba = self.Kb = dict, mapping 211 # source: {each a in self.B.arrows} 212 # target: tabulated function, mapping 213 # source: KB[source of a] 214 # target: KB[target of a] 215 216 def target(e): 217 if len(e) == 1: 218 return self.F.fo(e[0][0]) 219 else: 220 return self.B.target(e[-1]) 221 222 def add_element(e): 223 if self.irreducible(e): 224 block.append(e) 225 KB[target(e)].append(e) 226 else: 227 pass 228 229 KB = dict([(B, []) for B in self.B.objects]) 230 block = [] 231 232 for A in self.A.objects: 233 for x in self.X.fo(A): 234 add_element(((A, x),)) 235 236 while block: 237 oblock = block 238 block = [] 239 for e in oblock: 240 tgt = target(e) 241 for a in self.B.arrows: 242 if self.B.source(a) == tgt: 243 add_element(e + (a,)) 244 245 Kb = {} 246 247 for a in self.B.arrows: 248 src = KB[self.B.source(a)] 249 tgt = KB[self.B.target(a)] 250 tab = dict([(s, self.reduce(s + (a,))) for s in src]) 251 Kb[a] = self.Cat.Function(tab, src, tgt) 252 253 KB = self.Cat.Function(KB, self.B.objects, list(KB.values())) 254 Kb = self.Cat.Function(Kb, self.B.arrows, list(Kb.values())) 255 256 self.KB = KB 257 self.Kb = Kb 258 self.K = self.Cat.Functor(KB, Kb) 259 260 def make_natural_transformation(self): 261 262 # Precondition: 263 # initial tables should be initialized 264 # self.K.fo should exist 265 266 # Postcondition: 267 # 268 # self.nat[A] for A in self.A.objects 269 270 get_nat_memo = {} 271 272 def get_nat(A): 273 if A in get_nat_memo: 274 return get_nat_memo[A] 275 276 src = self.X.fo(A) 277 tgt = self.K.fo(self.F.fo(A)) 278 tab = dict([(x, self.reduce(((A, x),))) for x in src]) 279 get_nat_memo[A] = self.Cat.Function(tab, src, tgt) 280 return get_nat_memo[A] 281 282 self.nat = self.Cat.Function(get_nat, self.A.objects, None) 283 284 def make_word(self, x): 285 ots = self.obj_to_str 286 return '.'.join([ots(e) for e in x if e != '']) 287 288 def obj_to_str(self, x): 289 otn = self.obj_to_str_table 290 try: 291 return otn[x] 292 except KeyError: 293 assert not (isinstance(x, tuple) and len(x) > 2) 294 n = str(len(otn)) 295 #n = '%d:%s'%(len(otn), x) 296 #n = str(x) 297 otn[x] = n 298 self.str_to_obj_table[n] = x 299 return n 300 301 def str_to_obj(self, x): 302 return self.str_to_obj_table[x] 303 304 def irreducible(self, x): 305 tx = self.make_word(x) 306 return tx == self.rs.reduce(tx) 307 308 def reduce(self, x): 309 w = self.rs.reduce(self.make_word(x)) 310 return self.make_term(w) 311 312 def make_term(self, word): 313 sto = self.str_to_obj_table 314 return tuple([sto[s] for s in word.split('.') if s]) 315 316 317class KanAction: 318 def __init__(self, B, KB, a, targetof, irreducible, reduce): 319 srca = B.source(a) 320 tgta = B.target(a) 321 self.src = KB(srca) 322 self.tgt = KB(tgta) 323 self.a = a 324 self.srca = srca 325 self.targetof = targetof 326 self.irreducible = irreducible 327 self.reduce = reduce 328 329 def __call__(self, s): 330 if self.targetof(s) != self.srca: 331 raise TypeError('''\ 332Target of %r (= %r) does not match source of %r (= %r)''' % ( 333 s, self.targetof(s), self.a, self.srca)) 334 if not self.irreducible(s): 335 raise TypeError('''\ 336Argument %r is reducible to %r; and is thus not in the source set K.fo(%r)''' % ( 337 s, self.reduce(s), self.srca)) 338 return self.reduce(s + (self.a,)) 339 340 341class RegularSet: 342 # Wraps a regular expression; 343 # provides a set protocol for the underlying set of sequences: 344 # o If the RE specifies a finite language, iteration over its strings 345 # [ o set inclusion ] 346 347 is_simplified = 0 348 349 def __init__(self, re): 350 self.re = re 351 352 def __iter__(self): 353 return iter(self.uniform) 354 355 def __getitem__(self, x): 356 return self.uniform[x] 357 358 def __len__(self): 359 return len(self.uniform) 360 361 def get_xs_covered(self, coverage): 362 N = coverage 363 X = self.re.limited(coverage) 364 xs = X.sequni() 365 return [tuple(x) for x in xs] 366 367 def get_uniform(self): 368 self.simplify() 369 return self.re.sequni() 370 371 uniform = property(fget=get_uniform) 372 373 def simplify(self): 374 if not self.is_simplified: 375 self.re = self.re.simplified() 376 self.is_simplified = 1 377 378 379class ObjectTester: 380 def __init__(self, category_tester, object, code): 381 self.category_tester = category_tester 382 self.functor = category_tester.functor 383 self.object = object 384 self.code = code 385 386 def get_all_arrows(self): 387 return self.category_tester.arrows[self.object] 388 389 def get_intermediate_test_code(self): 390 return self.code 391 392 def get_python_test_source_code(self): 393 cmap = { 394 'aseq': 'assert e[%r] == e[%r]', 395 'evalfa': 'e[%r] = fa[%r](e[%r])', 396 'asfo': 'assert fo[%r](e[%r])' 397 } 398 399 return '\n'.join([cmap[c[0]] % c[1:] for c in self.code]) 400 401 def execode(self, arg): 402 code = self.get_python_test_source_code() 403 404 e = {'arg': arg} 405 d = {'fa': self.functor.fa, 406 'fo': self.functor.fo, 407 'e': e, 408 } 409 exec(code, d) 410 return e 411 412 def intercode(self, arg): 413 e = {'arg': arg} 414 fa = self.functor.fa 415 fo = self.functor.fo 416 for c in self.code: 417 a = c[0] 418 if a == 'evalfa': 419 dst, ar, src = c[1:] 420 e[dst] = fa[ar](e[src]) 421 elif a == 'asfo': 422 ob, src = c[1:] 423 if not fo[ob](e[src]): 424 raise ValueError('Predicate failed') 425 elif a == 'aseq': 426 na, nb = c[1:] 427 if e[na] != e[nb]: 428 raise ValueError('e[%r] != e[%r]' % (na, nb)) 429 else: 430 raise ValueError('Invalid code: %r' % (a,)) 431 432 def test(self, arg): 433 return self.intercode(arg) 434 435 436class CategoryTester: 437 def __init__(self, mod, functor, arrows, get_arrow_name=None): 438 self.mod = mod 439 self.cat = functor.src 440 self.functor = functor 441 self.arrows = arrows 442 if get_arrow_name is not None: 443 self.get_arrow_name = get_arrow_name 444 445 def get_arrow_name(self, a): 446 return '.'.join(a) 447 448 def get_eval_arrows_code(self, object, argname): 449 fa = self.functor.fa 450 451 name = argname 452 memo = {(): name} 453 memolist = [((), name)] 454 455 codes = [] 456 457 def eval_arrow(a): 458 if a in memo: 459 return memo[a] 460 a0 = a[:-1] 461 a1 = a[-1] 462 name = self.get_arrow_name(a) 463 na0 = eval_arrow(a0) 464 #codes.append('%s = fa[%r](%s)'%(name, a1, na0)) 465 codes.append(('evalfa', name, a1, na0)) 466 memo[a] = name 467 memolist.append((a, name)) 468 return name 469 470 for ar in self.arrows[object]: 471 eval_arrow(ar) 472 473 return codes, memolist 474 475 def get_object_tester(self, object): 476 code = self.get_test_object_code(object) 477 return ObjectTester(self, object, code) 478 479 def get_test_inclusion_code(self, object, ml): 480 codes = [] 481 src = self.functor.fo.src 482 for arrow, value in ml: 483 ob = object 484 if arrow: 485 ob = self.cat.graph.target(arrow[-1]) 486 #codes.append('assert fo[%r](%s)'%(ob, value)) 487 if src is None or ob in src: 488 codes.append(('asfo', ob, value)) 489 return codes 490 491 def get_test_object_code(self, object): 492 argname = 'arg' 493 evalcodes, memolist = self.get_eval_arrows_code(object, argname) 494 495 relcodes = self.get_test_relations_code(object, memolist) 496 497 incodes = self.get_test_inclusion_code(object, memolist) 498 499 return evalcodes+relcodes+incodes 500 501 def get_test_relations_code(self, object, memolist): 502 codes = [] 503 cat = self.cat 504 fa = self.functor.fa 505 memo = dict(memolist) 506 507 def teval_arrow(ar): 508 if ar in memo: 509 return memo[ar] 510 a0 = teval_arrow(ar[:-1]) 511 name = self.get_arrow_name(ar) 512 #codes.append('%s = fa[%r](%s)'%(name, ar[-1], a0)) 513 codes.append(('evalfa', name, ar[-1], a0)) 514 memo[ar] = name 515 return name 516 517 # Check that the equality relations really match up 518 # for all arrows in old memolist, i.e. original unique arrows 519 # which is arguably overkill sometimes?.. 520 for a, b in cat.relations: 521 a = tuple(a) 522 b = tuple(b) 523 src = cat.graph.source(a[0]) 524 for (arr, val) in memolist: 525 if arr: 526 tgt = cat.graph.target(arr[-1]) 527 else: 528 tgt = object 529 if src == tgt: 530 ara = arr + a 531 arb = arr + b 532 if ara != arb: 533 va = teval_arrow(ara) 534 vb = teval_arrow(arb) 535 assert va != vb 536 #codes.append('assert %s == %s'%(va, vb)) 537 codes.append(('aseq', va, vb)) 538 return codes 539 540 def test_object(self, object, value): 541 tester = self.get_object_tester(object) 542 tester.test(value) 543 return tester 544 545 def test_object_fail(self, object, value): 546 try: 547 self.test_object(object, value) 548 except Exception: 549 pass 550 else: 551 raise Exception('Exception excepted') 552 553 554class _GLUECLAMP_: 555 # 'imports' 556 557 def _get_KnuthBendix(self): return self._parent.KnuthBendix.KnuthBendix 558 def _get_FiniteAutomaton(self): return self._parent.FSA.FiniteAutomaton 559 def _get_SolveFSA(self): return self._parent.RE.SolveFSA 560 def _get_Cat(self): return self._parent.Cat 561 562 # Main exported interface is the lke method 563 # which provides a context for the LeftKanExtension class. 564 565 def lke(self, A, B, R, X, F): 566 return LeftKanExtension(self, A, B, R, X, F) 567 568 # Other functions - examples of applications of Kan extension 569 # in alphabetic order 570 571 def arrows_map(self, cat, from_objects=0, coverage=1): 572 if from_objects: 573 cat = cat.get_dual() 574 575 A = self.Cat.Graph(cat.graph.objects, []) 576 B = cat.graph 577 R = cat.relations 578 X = self.Cat.Functor(lambda x: [1], lambda x: lambda y: y) 579 F = self.Cat.Functor(lambda x: x, lambda x: []) 580 ke = self.lke(A, B, R, X, F) 581 582 memo = {} 583 584 def get_arrows(object): 585 if object in memo: 586 return memo[object] 587 re = ke.K.fo[object].re.rempretup() 588 if from_objects: 589 re = re.reversed() 590 if str(coverage).startswith('length'): 591 maxlen = int(coverage[6:]) 592 ar = [] 593 xs = re.get_words_memo() 594 for i in range(1, maxlen+1): 595 ar.extend([tuple(x) for x in xs.get_words_of_length(i)]) 596 else: 597 re = re.limited(coverage) 598 xs = re.sequni() 599 ar = [tuple(x) for x in xs] 600 memo[object] = ar 601 return ar 602 603 return self.Cat.Function( 604 get_arrows, 605 src=ke.K.fo.src, 606 tgt=None 607 ) 608 609 def category_tester(self, functor, arrows=None, coverage=1): 610 if isinstance(functor, tuple): 611 fo, fa, src = functor 612 if fo is None: 613 def fo(x): return lambda y: 1 614 functor = self.Cat.Functor(fo, fa, src) 615 if arrows is None: 616 arrows = self.arrows_map( 617 functor.src, from_objects=1, coverage=coverage) 618 return CategoryTester(self, functor, arrows) 619 620 def coequalizer(self, S0, S1, f0, f1): 621 # Given 622 # 623 # S0, S1 sets (objects that can be iterated over) 624 # f0, f1 functions from S0 to S1 625 # 626 # Return a coequalizing function, 627 # such that in the following diagram: 628 629 # 630 # S0 ===== S0 631 # | | 632 # | f0 | f1 633 # | | 634 # V V 635 # S1 ===== S1 ==== coequalizing_function.src 636 # | 637 # | coequalizing_function 638 # | 639 # V 640 # coequalizing_function.tgt 641 642 # both paths from S0 to coequalizing_function.tgt will be equivalent, 643 # and coequalizing_function.tgt is a colimit of all such sets. 644 # 645 # The coequalizing_function object is callable with 646 # an argument from S1, and has the following attributes: 647 # .src is identical to S1 648 # .tgt is a set in iterable form 649 # .asdict() returns a dict representing the mapping 650 651 objects = [0, 1] 652 arrows = {'a0': (0, 1), 'a1': (0, 1)} 653 A = self.Cat.Graph(objects, arrows) 654 655 Xo = self.Cat.Function({0: S0, 1: S1}, objects, [S0, S1]) 656 Xa = self.Cat.Function({'a0': f0, 'a1': f1}, arrows, [f0, f1]) 657 X = self.Cat.Functor(Xo, Xa) 658 659 colimit_object, colimit_functions = self.colimit(A, X) 660 return colimit_functions[1] 661 662 def colimit(self, A, X): 663 # According to 9.6 in (ref.251) 664 665 B = self.Cat.Graph([0], {}) 666 R = [] 667 F = self.Cat.Functor(lambda x: 0, lambda x: ()) 668 669 lka = self.lke(A, B, R, X, F) 670 671 colimit_object = lka.KB[0] 672 colimit_functions = lka.nat 673 674 # Reduce elements to a smaller (but isomorphic) form 675 # I.E since elements are all of the form 676 # ((A, X),) 677 # they can be reduced to the form 678 # (A, X) 679 # 680 681 colimit_object = [x[0] for x in colimit_object] 682 683 colimit_functions = dict([ 684 (A, self.Cat.Function( 685 dict([(a, k[0]) 686 for (a, k) in list(cof.items())]), 687 cof.src, 688 colimit_object, 689 ) 690 ) 691 for (A, cof) in list(colimit_functions.items())]) 692 693 return colimit_object, colimit_functions 694 695 def test_arrows(self, functor, object, value): 696 # Application of arrow listing to test sequencing 697 # Discussed in Notes Mar 9 2005 698 699 tester = self.category_tester(functor) 700 return tester.test_object(object, value) 701